mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Merge branch 'master' into enable_atomic_database_by_default
This commit is contained in:
commit
b96157e32b
@ -14,6 +14,8 @@ TRIES=3
|
||||
AMD64_BIN_URL="https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_build_check/gcc-10_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"
|
||||
AARCH64_BIN_URL="https://clickhouse-builds.s3.yandex.net/0/e29c4c3cc47ab2a6c4516486c1b77d57e7d42643/clickhouse_special_build_check/clang-10-aarch64_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"
|
||||
|
||||
# Note: on older Ubuntu versions, 'axel' does not support IPv6. If you are using IPv6-only servers on very old Ubuntu, just don't install 'axel'.
|
||||
|
||||
FASTER_DOWNLOAD=wget
|
||||
if command -v axel >/dev/null; then
|
||||
FASTER_DOWNLOAD=axel
|
||||
@ -36,14 +38,6 @@ if [[ ! -f clickhouse ]]; then
|
||||
$FASTER_DOWNLOAD "$AMD64_BIN_URL"
|
||||
elif [[ $CPU == aarch64 ]]; then
|
||||
$FASTER_DOWNLOAD "$AARCH64_BIN_URL"
|
||||
|
||||
# Download configs. ARM version has no embedded configs.
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.xml
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/users.xml
|
||||
mkdir config.d
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/path.xml -O config.d/path.xml
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/access_control.xml -O config.d/access_control.xml
|
||||
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/programs/server/config.d/log_to_console.xml -O config.d/log_to_console.xml
|
||||
else
|
||||
echo "Unsupported CPU type: $CPU"
|
||||
exit 1
|
||||
@ -60,10 +54,12 @@ if [[ ! -d data ]]; then
|
||||
if [[ ! -f $DATASET ]]; then
|
||||
$FASTER_DOWNLOAD "https://clickhouse-datasets.s3.yandex.net/hits/partitions/$DATASET"
|
||||
fi
|
||||
|
||||
|
||||
tar $TAR_PARAMS --strip-components=1 --directory=. -x -v -f $DATASET
|
||||
fi
|
||||
|
||||
uptime
|
||||
|
||||
echo "Starting clickhouse-server"
|
||||
|
||||
./clickhouse server > server.log 2>&1 &
|
||||
@ -105,9 +101,12 @@ echo
|
||||
echo "Benchmark complete. System info:"
|
||||
echo
|
||||
|
||||
echo '----Version and build id--------'
|
||||
./clickhouse local --query "SELECT version(), buildId()"
|
||||
echo '----Version, build id-----------'
|
||||
./clickhouse local --query "SELECT format('Version: {}, build id: {}', version(), buildId())"
|
||||
./clickhouse local --query "SELECT format('The number of threads is: {}', value) FROM system.settings WHERE name = 'max_threads'" --output-format TSVRaw
|
||||
./clickhouse local --query "SELECT format('Current time: {}', toString(now(), 'UTC'))"
|
||||
echo '----CPU-------------------------'
|
||||
cat /proc/cpuinfo | grep -i -F 'model name' | uniq
|
||||
lscpu
|
||||
echo '----Block Devices---------------'
|
||||
lsblk
|
||||
|
@ -89,7 +89,8 @@ EOT
|
||||
fi
|
||||
|
||||
if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
|
||||
$gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG &
|
||||
# Listen only on localhost until the initialization is done
|
||||
$gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG -- --listen_host=127.0.0.1 &
|
||||
pid="$!"
|
||||
|
||||
# check if clickhouse is ready to accept connections
|
||||
|
@ -38,7 +38,7 @@ toc_title: Adopters
|
||||
| <a href="https://db.com" class="favicon">Deutsche Bank</a> | Finance | BI Analytics | — | — | [Slides in English, October 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) |
|
||||
| <a href="https://www.diva-e.com" class="favicon">Diva-e</a> | Digital consulting | Main Product | — | — | [Slides in English, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) |
|
||||
| <a href="https://www.ecwid.com/" class="favicon">Ecwid</a> | E-commerce SaaS | Metrics, Logging | — | — | [Slides in Russian, April 2019](https://nastachku.ru/var/files/1/presentation/backend/2_Backend_6.pdf) |
|
||||
| <a href="https://www.ebay.com/" class="favicon">eBay</a> | E-commerce | TBA | — | — | [Webinar, Sep 2020](https://altinity.com/webinarspage/2020/09/08/migrating-from-druid-to-next-gen-olap-on-clickhouse-ebays-experience) |
|
||||
| <a href="https://www.ebay.com/" class="favicon">eBay</a> | E-commerce | Logs, Metrics and Events | — | — | [Official website, Sep 2020](https://tech.ebayinc.com/engineering/ou-online-analytical-processing/) |
|
||||
| <a href="https://www.exness.com" class="favicon">Exness</a> | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) |
|
||||
| <a href="https://fastnetmon.com/" class="favicon">FastNetMon</a> | DDoS Protection | Main Product | | — | [Official website](https://fastnetmon.com/docs-fnm-advanced/fastnetmon-advanced-traffic-persistency/) |
|
||||
| <a href="https://www.flipkart.com/" class="favicon">Flipkart</a> | e-Commerce | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=239) |
|
||||
|
@ -3,7 +3,7 @@ machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# 在运营商 {#select-in-operators}
|
||||
# IN 操作符 {#select-in-operators}
|
||||
|
||||
该 `IN`, `NOT IN`, `GLOBAL IN`,和 `GLOBAL NOT IN` 运算符是单独复盖的,因为它们的功能相当丰富。
|
||||
|
||||
|
@ -3,7 +3,7 @@ set(CLICKHOUSE_SERVER_SOURCES
|
||||
Server.cpp
|
||||
)
|
||||
|
||||
if (OS_LINUX AND ARCH_AMD64)
|
||||
if (OS_LINUX)
|
||||
set (LINK_CONFIG_LIB INTERFACE "-Wl,${WHOLE_ARCHIVE} $<TARGET_FILE:clickhouse_server_configs> -Wl,${NO_WHOLE_ARCHIVE}")
|
||||
endif ()
|
||||
|
||||
|
@ -339,16 +339,23 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
{
|
||||
if (hasLinuxCapability(CAP_IPC_LOCK))
|
||||
{
|
||||
/// Get the memory area with (current) code segment.
|
||||
/// It's better to lock only the code segment instead of calling "mlockall",
|
||||
/// because otherwise debug info will be also locked in memory, and it can be huge.
|
||||
auto [addr, len] = getMappedArea(reinterpret_cast<void *>(mainEntryClickHouseServer));
|
||||
try
|
||||
{
|
||||
/// Get the memory area with (current) code segment.
|
||||
/// It's better to lock only the code segment instead of calling "mlockall",
|
||||
/// because otherwise debug info will be also locked in memory, and it can be huge.
|
||||
auto [addr, len] = getMappedArea(reinterpret_cast<void *>(mainEntryClickHouseServer));
|
||||
|
||||
LOG_TRACE(log, "Will do mlock to prevent executable memory from being paged out. It may take a few seconds.");
|
||||
if (0 != mlock(addr, len))
|
||||
LOG_WARNING(log, "Failed mlock: {}", errnoToString(ErrorCodes::SYSTEM_ERROR));
|
||||
else
|
||||
LOG_TRACE(log, "The memory map of clickhouse executable has been mlock'ed, total {}", ReadableSize(len));
|
||||
LOG_TRACE(log, "Will do mlock to prevent executable memory from being paged out. It may take a few seconds.");
|
||||
if (0 != mlock(addr, len))
|
||||
LOG_WARNING(log, "Failed mlock: {}", errnoToString(ErrorCodes::SYSTEM_ERROR));
|
||||
else
|
||||
LOG_TRACE(log, "The memory map of clickhouse executable has been mlock'ed, total {}", ReadableSize(len));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
LOG_WARNING(log, "Cannot mlock: {}", getCurrentExceptionMessage(false));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -41,11 +41,6 @@ void FileChecker::setEmpty(const String & full_file_path)
|
||||
map[fileName(full_file_path)] = 0;
|
||||
}
|
||||
|
||||
FileChecker::Map FileChecker::getFileSizes() const
|
||||
{
|
||||
return map;
|
||||
}
|
||||
|
||||
CheckResults FileChecker::check() const
|
||||
{
|
||||
// Read the files again every time you call `check` - so as not to violate the constancy.
|
||||
|
@ -27,12 +27,10 @@ public:
|
||||
/// The purpose of this function is to rollback a group of unfinished writes.
|
||||
void repair();
|
||||
|
||||
private:
|
||||
/// File name -> size.
|
||||
using Map = std::map<String, UInt64>;
|
||||
|
||||
Map getFileSizes() const;
|
||||
|
||||
private:
|
||||
void initialize();
|
||||
void updateImpl(const String & file_path);
|
||||
void load(Map & local_map, const String & path) const;
|
||||
|
@ -80,7 +80,7 @@ public:
|
||||
DataTypePtr getReturnType(const ColumnsWithTypeAndName &) const override { return {}; } // Not used
|
||||
|
||||
bool useDefaultImplementationForNulls() const override { return false; }
|
||||
bool useDefaultImplementationForLowCardinalityColumns() const override { return true; }
|
||||
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
|
||||
|
||||
bool isVariadic() const override { return true; }
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
|
@ -401,7 +401,7 @@ void ProcessList::killAllQueries()
|
||||
|
||||
QueryStatusInfo QueryStatus::getInfo(bool get_thread_list, bool get_profile_events, bool get_settings) const
|
||||
{
|
||||
QueryStatusInfo res;
|
||||
QueryStatusInfo res{};
|
||||
|
||||
res.query = query;
|
||||
res.client_info = client_info;
|
||||
|
@ -52,7 +52,6 @@ namespace ErrorCodes
|
||||
extern const int UNKNOWN_IDENTIFIER;
|
||||
extern const int INCORRECT_FILE_NAME;
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
extern const int TIMEOUT_EXCEEDED;
|
||||
}
|
||||
|
||||
namespace
|
||||
@ -200,17 +199,6 @@ StorageFile::StorageFile(CommonArguments args)
|
||||
setInMemoryMetadata(storage_metadata);
|
||||
}
|
||||
|
||||
|
||||
static std::chrono::seconds getLockTimeout(const Context & context)
|
||||
{
|
||||
const Settings & settings = context.getSettingsRef();
|
||||
Int64 lock_timeout = settings.lock_acquire_timeout.totalSeconds();
|
||||
if (settings.max_execution_time.totalSeconds() != 0 && settings.max_execution_time.totalSeconds() < lock_timeout)
|
||||
lock_timeout = settings.max_execution_time.totalSeconds();
|
||||
return std::chrono::seconds{lock_timeout};
|
||||
}
|
||||
|
||||
|
||||
class StorageFileSource : public SourceWithProgress
|
||||
{
|
||||
public:
|
||||
@ -257,9 +245,7 @@ public:
|
||||
{
|
||||
if (storage->use_table_fd)
|
||||
{
|
||||
unique_lock = std::unique_lock(storage->rwlock, getLockTimeout(context));
|
||||
if (!unique_lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
unique_lock = std::unique_lock(storage->rwlock);
|
||||
|
||||
/// We could use common ReadBuffer and WriteBuffer in storage to leverage cache
|
||||
/// and add ability to seek unseekable files, but cache sync isn't supported.
|
||||
@ -278,9 +264,7 @@ public:
|
||||
}
|
||||
else
|
||||
{
|
||||
shared_lock = std::shared_lock(storage->rwlock, getLockTimeout(context));
|
||||
if (!shared_lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
shared_lock = std::shared_lock(storage->rwlock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -389,8 +373,8 @@ private:
|
||||
|
||||
bool finished_generate = false;
|
||||
|
||||
std::shared_lock<std::shared_timed_mutex> shared_lock;
|
||||
std::unique_lock<std::shared_timed_mutex> unique_lock;
|
||||
std::shared_lock<std::shared_mutex> shared_lock;
|
||||
std::unique_lock<std::shared_mutex> unique_lock;
|
||||
};
|
||||
|
||||
|
||||
@ -433,7 +417,7 @@ Pipe StorageFile::read(
|
||||
|
||||
for (size_t i = 0; i < num_streams; ++i)
|
||||
pipes.emplace_back(std::make_shared<StorageFileSource>(
|
||||
this_ptr, metadata_snapshot, context, max_block_size, files_info, metadata_snapshot->getColumns().getDefaults()));
|
||||
this_ptr, metadata_snapshot, context, max_block_size, files_info, metadata_snapshot->getColumns().getDefaults()));
|
||||
|
||||
return Pipe::unitePipes(std::move(pipes));
|
||||
}
|
||||
@ -445,16 +429,12 @@ public:
|
||||
explicit StorageFileBlockOutputStream(
|
||||
StorageFile & storage_,
|
||||
const StorageMetadataPtr & metadata_snapshot_,
|
||||
std::unique_lock<std::shared_timed_mutex> && lock_,
|
||||
const CompressionMethod compression_method,
|
||||
const Context & context)
|
||||
: storage(storage_)
|
||||
, metadata_snapshot(metadata_snapshot_)
|
||||
, lock(std::move(lock_))
|
||||
, lock(storage.rwlock)
|
||||
{
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
|
||||
std::unique_ptr<WriteBufferFromFileDescriptor> naked_buffer = nullptr;
|
||||
if (storage.use_table_fd)
|
||||
{
|
||||
@ -508,7 +488,7 @@ public:
|
||||
private:
|
||||
StorageFile & storage;
|
||||
StorageMetadataPtr metadata_snapshot;
|
||||
std::unique_lock<std::shared_timed_mutex> lock;
|
||||
std::unique_lock<std::shared_mutex> lock;
|
||||
std::unique_ptr<WriteBuffer> write_buf;
|
||||
BlockOutputStreamPtr writer;
|
||||
bool prefix_written{false};
|
||||
@ -526,7 +506,7 @@ BlockOutputStreamPtr StorageFile::write(
|
||||
if (!paths.empty())
|
||||
path = paths[0];
|
||||
|
||||
return std::make_shared<StorageFileBlockOutputStream>(*this, metadata_snapshot, std::unique_lock{rwlock, getLockTimeout(context)},
|
||||
return std::make_shared<StorageFileBlockOutputStream>(*this, metadata_snapshot,
|
||||
chooseCompressionMethod(path, compression_method), context);
|
||||
}
|
||||
|
||||
@ -549,6 +529,8 @@ void StorageFile::rename(const String & new_path_to_table_data, const StorageID
|
||||
if (path_new == paths[0])
|
||||
return;
|
||||
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
Poco::File(Poco::Path(path_new).parent()).createDirectories();
|
||||
Poco::File(paths[0]).renameTo(path_new);
|
||||
|
||||
@ -565,6 +547,8 @@ void StorageFile::truncate(
|
||||
if (paths.size() != 1)
|
||||
throw Exception("Can't truncate table '" + getStorageID().getNameForLogs() + "' in readonly mode", ErrorCodes::DATABASE_ACCESS_DENIED);
|
||||
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
if (use_table_fd)
|
||||
{
|
||||
if (0 != ::ftruncate(table_fd, 0))
|
||||
|
@ -89,7 +89,7 @@ private:
|
||||
std::atomic<bool> table_fd_was_used{false}; /// To detect repeating reads from stdin
|
||||
off_t table_fd_init_offset = -1; /// Initial position of fd, used for repeating reads
|
||||
|
||||
mutable std::shared_timed_mutex rwlock;
|
||||
mutable std::shared_mutex rwlock;
|
||||
|
||||
Poco::Logger * log = &Poco::Logger::get("StorageFile");
|
||||
};
|
||||
|
@ -39,7 +39,6 @@ namespace DB
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int TIMEOUT_EXCEEDED;
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int DUPLICATE_COLUMN;
|
||||
extern const int SIZES_OF_MARKS_FILES_ARE_INCONSISTENT;
|
||||
@ -51,6 +50,7 @@ namespace ErrorCodes
|
||||
class LogSource final : public SourceWithProgress
|
||||
{
|
||||
public:
|
||||
|
||||
static Block getHeader(const NamesAndTypesList & columns)
|
||||
{
|
||||
Block res;
|
||||
@ -116,16 +116,13 @@ private:
|
||||
class LogBlockOutputStream final : public IBlockOutputStream
|
||||
{
|
||||
public:
|
||||
explicit LogBlockOutputStream(
|
||||
StorageLog & storage_, const StorageMetadataPtr & metadata_snapshot_, std::unique_lock<std::shared_timed_mutex> && lock_)
|
||||
explicit LogBlockOutputStream(StorageLog & storage_, const StorageMetadataPtr & metadata_snapshot_)
|
||||
: storage(storage_)
|
||||
, metadata_snapshot(metadata_snapshot_)
|
||||
, lock(std::move(lock_))
|
||||
, lock(storage.rwlock)
|
||||
, marks_stream(
|
||||
storage.disk->writeFile(storage.marks_file_path, 4096, WriteMode::Rewrite))
|
||||
{
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
}
|
||||
|
||||
~LogBlockOutputStream() override
|
||||
@ -152,7 +149,7 @@ public:
|
||||
private:
|
||||
StorageLog & storage;
|
||||
StorageMetadataPtr metadata_snapshot;
|
||||
std::unique_lock<std::shared_timed_mutex> lock;
|
||||
std::unique_lock<std::shared_mutex> lock;
|
||||
bool done = false;
|
||||
|
||||
struct Stream
|
||||
@ -510,11 +507,9 @@ void StorageLog::addFiles(const String & column_name, const IDataType & type)
|
||||
}
|
||||
|
||||
|
||||
void StorageLog::loadMarks(std::chrono::seconds lock_timeout)
|
||||
void StorageLog::loadMarks()
|
||||
{
|
||||
std::unique_lock lock(rwlock, lock_timeout);
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
if (loaded_marks)
|
||||
return;
|
||||
@ -557,6 +552,8 @@ void StorageLog::rename(const String & new_path_to_table_data, const StorageID &
|
||||
{
|
||||
assert(table_path != new_path_to_table_data);
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
disk->moveDirectory(table_path, new_path_to_table_data);
|
||||
|
||||
table_path = new_path_to_table_data;
|
||||
@ -572,6 +569,8 @@ void StorageLog::rename(const String & new_path_to_table_data, const StorageID &
|
||||
|
||||
void StorageLog::truncate(const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, const Context &, TableExclusiveLockHolder &)
|
||||
{
|
||||
std::shared_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
files.clear();
|
||||
file_count = 0;
|
||||
loaded_marks = false;
|
||||
@ -611,17 +610,6 @@ const StorageLog::Marks & StorageLog::getMarksWithRealRowCount(const StorageMeta
|
||||
return it->second.marks;
|
||||
}
|
||||
|
||||
|
||||
static std::chrono::seconds getLockTimeout(const Context & context)
|
||||
{
|
||||
const Settings & settings = context.getSettingsRef();
|
||||
Int64 lock_timeout = settings.lock_acquire_timeout.totalSeconds();
|
||||
if (settings.max_execution_time.totalSeconds() != 0 && settings.max_execution_time.totalSeconds() < lock_timeout)
|
||||
lock_timeout = settings.max_execution_time.totalSeconds();
|
||||
return std::chrono::seconds{lock_timeout};
|
||||
}
|
||||
|
||||
|
||||
Pipe StorageLog::read(
|
||||
const Names & column_names,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
@ -632,15 +620,11 @@ Pipe StorageLog::read(
|
||||
unsigned num_streams)
|
||||
{
|
||||
metadata_snapshot->check(column_names, getVirtuals(), getStorageID());
|
||||
|
||||
auto lock_timeout = getLockTimeout(context);
|
||||
loadMarks(lock_timeout);
|
||||
loadMarks();
|
||||
|
||||
NamesAndTypesList all_columns = Nested::collect(metadata_snapshot->getColumns().getAllPhysical().addTypes(column_names));
|
||||
|
||||
std::shared_lock lock(rwlock, lock_timeout);
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
std::shared_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
Pipes pipes;
|
||||
|
||||
@ -669,28 +653,18 @@ Pipe StorageLog::read(
|
||||
max_read_buffer_size));
|
||||
}
|
||||
|
||||
/// No need to hold lock while reading because we read fixed range of data that does not change while appending more data.
|
||||
return Pipe::unitePipes(std::move(pipes));
|
||||
}
|
||||
|
||||
BlockOutputStreamPtr StorageLog::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, const Context & context)
|
||||
BlockOutputStreamPtr StorageLog::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, const Context & /*context*/)
|
||||
{
|
||||
auto lock_timeout = getLockTimeout(context);
|
||||
loadMarks(lock_timeout);
|
||||
|
||||
std::unique_lock lock(rwlock, lock_timeout);
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
|
||||
return std::make_shared<LogBlockOutputStream>(*this, metadata_snapshot, std::move(lock));
|
||||
loadMarks();
|
||||
return std::make_shared<LogBlockOutputStream>(*this, metadata_snapshot);
|
||||
}
|
||||
|
||||
CheckResults StorageLog::checkData(const ASTPtr & /* query */, const Context & context)
|
||||
CheckResults StorageLog::checkData(const ASTPtr & /* query */, const Context & /* context */)
|
||||
{
|
||||
std::shared_lock lock(rwlock, getLockTimeout(context));
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
|
||||
std::shared_lock<std::shared_mutex> lock(rwlock);
|
||||
return file_checker.check();
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ private:
|
||||
DiskPtr disk;
|
||||
String table_path;
|
||||
|
||||
mutable std::shared_timed_mutex rwlock;
|
||||
mutable std::shared_mutex rwlock;
|
||||
|
||||
Files files;
|
||||
|
||||
@ -104,7 +104,7 @@ private:
|
||||
/// Read marks files if they are not already read.
|
||||
/// It is done lazily, so that with a large number of tables, the server starts quickly.
|
||||
/// You can not call with a write locked `rwlock`.
|
||||
void loadMarks(std::chrono::seconds lock_timeout);
|
||||
void loadMarks();
|
||||
|
||||
/** For normal columns, the number of rows in the block is specified in the marks.
|
||||
* For array columns and nested structures, there are more than one group of marks that correspond to different files
|
||||
|
@ -47,13 +47,13 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int INCORRECT_FILE_NAME;
|
||||
extern const int TIMEOUT_EXCEEDED;
|
||||
}
|
||||
|
||||
|
||||
class StripeLogSource final : public SourceWithProgress
|
||||
{
|
||||
public:
|
||||
|
||||
static Block getHeader(
|
||||
StorageStripeLog & storage,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
@ -157,11 +157,10 @@ private:
|
||||
class StripeLogBlockOutputStream final : public IBlockOutputStream
|
||||
{
|
||||
public:
|
||||
explicit StripeLogBlockOutputStream(
|
||||
StorageStripeLog & storage_, const StorageMetadataPtr & metadata_snapshot_, std::unique_lock<std::shared_timed_mutex> && lock_)
|
||||
explicit StripeLogBlockOutputStream(StorageStripeLog & storage_, const StorageMetadataPtr & metadata_snapshot_)
|
||||
: storage(storage_)
|
||||
, metadata_snapshot(metadata_snapshot_)
|
||||
, lock(std::move(lock_))
|
||||
, lock(storage.rwlock)
|
||||
, data_out_file(storage.table_path + "data.bin")
|
||||
, data_out_compressed(storage.disk->writeFile(data_out_file, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Append))
|
||||
, data_out(std::make_unique<CompressedWriteBuffer>(
|
||||
@ -171,8 +170,6 @@ public:
|
||||
, index_out(std::make_unique<CompressedWriteBuffer>(*index_out_compressed))
|
||||
, block_out(*data_out, 0, metadata_snapshot->getSampleBlock(), false, index_out.get(), storage.disk->getFileSize(data_out_file))
|
||||
{
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
}
|
||||
|
||||
~StripeLogBlockOutputStream() override
|
||||
@ -226,7 +223,7 @@ public:
|
||||
private:
|
||||
StorageStripeLog & storage;
|
||||
StorageMetadataPtr metadata_snapshot;
|
||||
std::unique_lock<std::shared_timed_mutex> lock;
|
||||
std::unique_lock<std::shared_mutex> lock;
|
||||
|
||||
String data_out_file;
|
||||
std::unique_ptr<WriteBuffer> data_out_compressed;
|
||||
@ -289,6 +286,8 @@ void StorageStripeLog::rename(const String & new_path_to_table_data, const Stora
|
||||
{
|
||||
assert(table_path != new_path_to_table_data);
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
disk->moveDirectory(table_path, new_path_to_table_data);
|
||||
|
||||
table_path = new_path_to_table_data;
|
||||
@ -298,16 +297,6 @@ void StorageStripeLog::rename(const String & new_path_to_table_data, const Stora
|
||||
}
|
||||
|
||||
|
||||
static std::chrono::seconds getLockTimeout(const Context & context)
|
||||
{
|
||||
const Settings & settings = context.getSettingsRef();
|
||||
Int64 lock_timeout = settings.lock_acquire_timeout.totalSeconds();
|
||||
if (settings.max_execution_time.totalSeconds() != 0 && settings.max_execution_time.totalSeconds() < lock_timeout)
|
||||
lock_timeout = settings.max_execution_time.totalSeconds();
|
||||
return std::chrono::seconds{lock_timeout};
|
||||
}
|
||||
|
||||
|
||||
Pipe StorageStripeLog::read(
|
||||
const Names & column_names,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
@ -317,9 +306,7 @@ Pipe StorageStripeLog::read(
|
||||
const size_t /*max_block_size*/,
|
||||
unsigned num_streams)
|
||||
{
|
||||
std::shared_lock lock(rwlock, getLockTimeout(context));
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
std::shared_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
metadata_snapshot->check(column_names, getVirtuals(), getStorageID());
|
||||
|
||||
@ -358,28 +345,24 @@ Pipe StorageStripeLog::read(
|
||||
}
|
||||
|
||||
|
||||
BlockOutputStreamPtr StorageStripeLog::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, const Context & context)
|
||||
BlockOutputStreamPtr StorageStripeLog::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, const Context & /*context*/)
|
||||
{
|
||||
std::unique_lock lock(rwlock, getLockTimeout(context));
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
|
||||
return std::make_shared<StripeLogBlockOutputStream>(*this, metadata_snapshot, std::move(lock));
|
||||
return std::make_shared<StripeLogBlockOutputStream>(*this, metadata_snapshot);
|
||||
}
|
||||
|
||||
|
||||
CheckResults StorageStripeLog::checkData(const ASTPtr & /* query */, const Context & context)
|
||||
CheckResults StorageStripeLog::checkData(const ASTPtr & /* query */, const Context & /* context */)
|
||||
{
|
||||
std::shared_lock lock(rwlock, getLockTimeout(context));
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
|
||||
std::shared_lock<std::shared_mutex> lock(rwlock);
|
||||
return file_checker.check();
|
||||
}
|
||||
|
||||
void StorageStripeLog::truncate(const ASTPtr &, const StorageMetadataPtr &, const Context &, TableExclusiveLockHolder &)
|
||||
{
|
||||
std::shared_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
disk->clearDirectory(table_path);
|
||||
|
||||
file_checker = FileChecker{disk, table_path + "sizes.json"};
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ private:
|
||||
size_t max_compress_block_size;
|
||||
|
||||
FileChecker file_checker;
|
||||
mutable std::shared_timed_mutex rwlock;
|
||||
mutable std::shared_mutex rwlock;
|
||||
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
@ -13,7 +13,6 @@
|
||||
|
||||
#include <IO/ReadBufferFromFileBase.h>
|
||||
#include <IO/WriteBufferFromFileBase.h>
|
||||
#include <IO/LimitReadBuffer.h>
|
||||
#include <Compression/CompressionFactory.h>
|
||||
#include <Compression/CompressedReadBuffer.h>
|
||||
#include <Compression/CompressedWriteBuffer.h>
|
||||
@ -47,7 +46,6 @@ namespace DB
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int TIMEOUT_EXCEEDED;
|
||||
extern const int DUPLICATE_COLUMN;
|
||||
extern const int INCORRECT_FILE_NAME;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
@ -57,6 +55,7 @@ namespace ErrorCodes
|
||||
class TinyLogSource final : public SourceWithProgress
|
||||
{
|
||||
public:
|
||||
|
||||
static Block getHeader(const NamesAndTypesList & columns)
|
||||
{
|
||||
Block res;
|
||||
@ -67,17 +66,10 @@ public:
|
||||
return Nested::flatten(res);
|
||||
}
|
||||
|
||||
TinyLogSource(
|
||||
size_t block_size_,
|
||||
const NamesAndTypesList & columns_,
|
||||
StorageTinyLog & storage_,
|
||||
size_t max_read_buffer_size_,
|
||||
FileChecker::Map file_sizes_)
|
||||
TinyLogSource(size_t block_size_, const NamesAndTypesList & columns_, StorageTinyLog & storage_, size_t max_read_buffer_size_)
|
||||
: SourceWithProgress(getHeader(columns_))
|
||||
, block_size(block_size_), columns(columns_), storage(storage_)
|
||||
, max_read_buffer_size(max_read_buffer_size_), file_sizes(std::move(file_sizes_))
|
||||
{
|
||||
}
|
||||
, block_size(block_size_), columns(columns_), storage(storage_), lock(storage_.rwlock)
|
||||
, max_read_buffer_size(max_read_buffer_size_) {}
|
||||
|
||||
String getName() const override { return "TinyLog"; }
|
||||
|
||||
@ -88,21 +80,19 @@ private:
|
||||
size_t block_size;
|
||||
NamesAndTypesList columns;
|
||||
StorageTinyLog & storage;
|
||||
std::shared_lock<std::shared_mutex> lock;
|
||||
bool is_finished = false;
|
||||
size_t max_read_buffer_size;
|
||||
FileChecker::Map file_sizes;
|
||||
|
||||
struct Stream
|
||||
{
|
||||
Stream(const DiskPtr & disk, const String & data_path, size_t max_read_buffer_size_, size_t file_size)
|
||||
Stream(const DiskPtr & disk, const String & data_path, size_t max_read_buffer_size_)
|
||||
: plain(disk->readFile(data_path, std::min(max_read_buffer_size_, disk->getFileSize(data_path)))),
|
||||
limited(std::make_unique<LimitReadBuffer>(*plain, file_size, false)),
|
||||
compressed(*plain)
|
||||
{
|
||||
}
|
||||
|
||||
std::unique_ptr<ReadBuffer> plain;
|
||||
std::unique_ptr<ReadBuffer> limited;
|
||||
CompressedReadBuffer compressed;
|
||||
};
|
||||
|
||||
@ -120,14 +110,9 @@ private:
|
||||
class TinyLogBlockOutputStream final : public IBlockOutputStream
|
||||
{
|
||||
public:
|
||||
explicit TinyLogBlockOutputStream(
|
||||
StorageTinyLog & storage_,
|
||||
const StorageMetadataPtr & metadata_snapshot_,
|
||||
std::unique_lock<std::shared_timed_mutex> && lock_)
|
||||
: storage(storage_), metadata_snapshot(metadata_snapshot_), lock(std::move(lock_))
|
||||
explicit TinyLogBlockOutputStream(StorageTinyLog & storage_, const StorageMetadataPtr & metadata_snapshot_)
|
||||
: storage(storage_), metadata_snapshot(metadata_snapshot_), lock(storage_.rwlock)
|
||||
{
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
}
|
||||
|
||||
~TinyLogBlockOutputStream() override
|
||||
@ -155,7 +140,7 @@ public:
|
||||
private:
|
||||
StorageTinyLog & storage;
|
||||
StorageMetadataPtr metadata_snapshot;
|
||||
std::unique_lock<std::shared_timed_mutex> lock;
|
||||
std::unique_lock<std::shared_mutex> lock;
|
||||
bool done = false;
|
||||
|
||||
struct Stream
|
||||
@ -246,17 +231,13 @@ void TinyLogSource::readData(const String & name, const IDataType & type, IColum
|
||||
String stream_name = IDataType::getFileNameForStream(name, path);
|
||||
|
||||
if (!streams.count(stream_name))
|
||||
{
|
||||
String file_path = storage.files[stream_name].data_file_path;
|
||||
streams[stream_name] = std::make_unique<Stream>(
|
||||
storage.disk, file_path, max_read_buffer_size, file_sizes[fileName(file_path)]);
|
||||
}
|
||||
streams[stream_name] = std::make_unique<Stream>(storage.disk, storage.files[stream_name].data_file_path, max_read_buffer_size);
|
||||
|
||||
return &streams[stream_name]->compressed;
|
||||
};
|
||||
|
||||
if (deserialize_states.count(name) == 0)
|
||||
type.deserializeBinaryBulkStatePrefix(settings, deserialize_states[name]);
|
||||
type.deserializeBinaryBulkStatePrefix(settings, deserialize_states[name]);
|
||||
|
||||
type.deserializeBinaryBulkWithMultipleStreams(column, limit, settings, deserialize_states[name]);
|
||||
}
|
||||
@ -429,6 +410,8 @@ void StorageTinyLog::rename(const String & new_path_to_table_data, const Storage
|
||||
{
|
||||
assert(table_path != new_path_to_table_data);
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
disk->moveDirectory(table_path, new_path_to_table_data);
|
||||
|
||||
table_path = new_path_to_table_data;
|
||||
@ -441,16 +424,6 @@ void StorageTinyLog::rename(const String & new_path_to_table_data, const Storage
|
||||
}
|
||||
|
||||
|
||||
static std::chrono::seconds getLockTimeout(const Context & context)
|
||||
{
|
||||
const Settings & settings = context.getSettingsRef();
|
||||
Int64 lock_timeout = settings.lock_acquire_timeout.totalSeconds();
|
||||
if (settings.max_execution_time.totalSeconds() != 0 && settings.max_execution_time.totalSeconds() < lock_timeout)
|
||||
lock_timeout = settings.max_execution_time.totalSeconds();
|
||||
return std::chrono::seconds{lock_timeout};
|
||||
}
|
||||
|
||||
|
||||
Pipe StorageTinyLog::read(
|
||||
const Names & column_names,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
@ -464,40 +437,28 @@ Pipe StorageTinyLog::read(
|
||||
|
||||
// When reading, we lock the entire storage, because we only have one file
|
||||
// per column and can't modify it concurrently.
|
||||
const Settings & settings = context.getSettingsRef();
|
||||
|
||||
std::shared_lock lock{rwlock, getLockTimeout(context)};
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
|
||||
/// No need to hold lock while reading because we read fixed range of data that does not change while appending more data.
|
||||
return Pipe(std::make_shared<TinyLogSource>(
|
||||
max_block_size,
|
||||
Nested::collect(metadata_snapshot->getColumns().getAllPhysical().addTypes(column_names)),
|
||||
*this,
|
||||
settings.max_read_buffer_size,
|
||||
file_checker.getFileSizes()));
|
||||
max_block_size, Nested::collect(metadata_snapshot->getColumns().getAllPhysical().addTypes(column_names)), *this, context.getSettingsRef().max_read_buffer_size));
|
||||
}
|
||||
|
||||
|
||||
BlockOutputStreamPtr StorageTinyLog::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, const Context & context)
|
||||
BlockOutputStreamPtr StorageTinyLog::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, const Context & /*context*/)
|
||||
{
|
||||
return std::make_shared<TinyLogBlockOutputStream>(*this, metadata_snapshot, std::unique_lock{rwlock, getLockTimeout(context)});
|
||||
return std::make_shared<TinyLogBlockOutputStream>(*this, metadata_snapshot);
|
||||
}
|
||||
|
||||
|
||||
CheckResults StorageTinyLog::checkData(const ASTPtr & /* query */, const Context & context)
|
||||
CheckResults StorageTinyLog::checkData(const ASTPtr & /* query */, const Context & /* context */)
|
||||
{
|
||||
std::shared_lock lock(rwlock, getLockTimeout(context));
|
||||
if (!lock)
|
||||
throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
|
||||
std::shared_lock<std::shared_mutex> lock(rwlock);
|
||||
return file_checker.check();
|
||||
}
|
||||
|
||||
void StorageTinyLog::truncate(
|
||||
const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, const Context &, TableExclusiveLockHolder &)
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
|
||||
disk->clearDirectory(table_path);
|
||||
|
||||
files.clear();
|
||||
@ -507,6 +468,14 @@ void StorageTinyLog::truncate(
|
||||
addFiles(column.name, *column.type);
|
||||
}
|
||||
|
||||
void StorageTinyLog::drop()
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> lock(rwlock);
|
||||
if (disk->exists(table_path))
|
||||
disk->removeRecursive(table_path);
|
||||
files.clear();
|
||||
}
|
||||
|
||||
|
||||
void registerStorageTinyLog(StorageFactory & factory)
|
||||
{
|
||||
|
@ -43,6 +43,8 @@ public:
|
||||
|
||||
void truncate(const ASTPtr &, const StorageMetadataPtr & metadata_snapshot, const Context &, TableExclusiveLockHolder &) override;
|
||||
|
||||
void drop() override;
|
||||
|
||||
protected:
|
||||
StorageTinyLog(
|
||||
DiskPtr disk_,
|
||||
@ -68,7 +70,7 @@ private:
|
||||
Files files;
|
||||
|
||||
FileChecker file_checker;
|
||||
mutable std::shared_timed_mutex rwlock;
|
||||
mutable std::shared_mutex rwlock;
|
||||
|
||||
Poco::Logger * log;
|
||||
|
||||
|
@ -486,8 +486,8 @@ class ClickHouseCluster:
|
||||
start = time.time()
|
||||
while time.time() - start < timeout:
|
||||
try:
|
||||
connection.database_names()
|
||||
print "Connected to Mongo dbs:", connection.database_names()
|
||||
connection.list_database_names()
|
||||
print "Connected to Mongo dbs:", connection.list_database_names()
|
||||
return
|
||||
except Exception as ex:
|
||||
print "Can't connect to Mongo " + str(ex)
|
||||
|
@ -333,16 +333,16 @@ class _SourceExecutableBase(ExternalSource):
|
||||
user='root')
|
||||
|
||||
|
||||
class SourceExecutableCache(_SourceExecutableBase):
|
||||
class SourceExecutableHashed(_SourceExecutableBase):
|
||||
|
||||
def _get_cmd(self, path):
|
||||
return "cat {}".format(path)
|
||||
|
||||
def compatible_with_layout(self, layout):
|
||||
return 'cache' not in layout.name
|
||||
return 'hashed' in layout.name
|
||||
|
||||
|
||||
class SourceExecutableHashed(_SourceExecutableBase):
|
||||
class SourceExecutableCache(_SourceExecutableBase):
|
||||
|
||||
def _get_cmd(self, path):
|
||||
return "cat - >/dev/null;cat {}".format(path)
|
||||
|
@ -1,346 +0,0 @@
|
||||
import math
|
||||
import os
|
||||
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceMongo, SourceMongoURI, SourceHTTP, SourceHTTPS, SourceCassandra
|
||||
from helpers.external_sources import SourceMySQL, SourceClickHouse, SourceFile, SourceExecutableCache, \
|
||||
SourceExecutableHashed
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
dict_configs_path = os.path.join(SCRIPT_DIR, 'configs/dictionaries')
|
||||
|
||||
FIELDS = {
|
||||
"simple": [
|
||||
Field("KeyField", 'UInt64', is_key=True, default_value_for_get=9999999),
|
||||
Field("UInt8_", 'UInt8', default_value_for_get=55),
|
||||
Field("UInt16_", 'UInt16', default_value_for_get=66),
|
||||
Field("UInt32_", 'UInt32', default_value_for_get=77),
|
||||
Field("UInt64_", 'UInt64', default_value_for_get=88),
|
||||
Field("Int8_", 'Int8', default_value_for_get=-55),
|
||||
Field("Int16_", 'Int16', default_value_for_get=-66),
|
||||
Field("Int32_", 'Int32', default_value_for_get=-77),
|
||||
Field("Int64_", 'Int64', default_value_for_get=-88),
|
||||
Field("UUID_", 'UUID', default_value_for_get='550e8400-0000-0000-0000-000000000000'),
|
||||
Field("Date_", 'Date', default_value_for_get='2018-12-30'),
|
||||
Field("DateTime_", 'DateTime', default_value_for_get='2018-12-30 00:00:00'),
|
||||
Field("String_", 'String', default_value_for_get='hi'),
|
||||
Field("Float32_", 'Float32', default_value_for_get=555.11),
|
||||
Field("Float64_", 'Float64', default_value_for_get=777.11),
|
||||
Field("ParentKeyField", "UInt64", default_value_for_get=444, hierarchical=True)
|
||||
],
|
||||
"complex": [
|
||||
Field("KeyField1", 'UInt64', is_key=True, default_value_for_get=9999999),
|
||||
Field("KeyField2", 'String', is_key=True, default_value_for_get='xxxxxxxxx'),
|
||||
Field("UInt8_", 'UInt8', default_value_for_get=55),
|
||||
Field("UInt16_", 'UInt16', default_value_for_get=66),
|
||||
Field("UInt32_", 'UInt32', default_value_for_get=77),
|
||||
Field("UInt64_", 'UInt64', default_value_for_get=88),
|
||||
Field("Int8_", 'Int8', default_value_for_get=-55),
|
||||
Field("Int16_", 'Int16', default_value_for_get=-66),
|
||||
Field("Int32_", 'Int32', default_value_for_get=-77),
|
||||
Field("Int64_", 'Int64', default_value_for_get=-88),
|
||||
Field("UUID_", 'UUID', default_value_for_get='550e8400-0000-0000-0000-000000000000'),
|
||||
Field("Date_", 'Date', default_value_for_get='2018-12-30'),
|
||||
Field("DateTime_", 'DateTime', default_value_for_get='2018-12-30 00:00:00'),
|
||||
Field("String_", 'String', default_value_for_get='hi'),
|
||||
Field("Float32_", 'Float32', default_value_for_get=555.11),
|
||||
Field("Float64_", 'Float64', default_value_for_get=777.11),
|
||||
],
|
||||
"ranged": [
|
||||
Field("KeyField1", 'UInt64', is_key=True),
|
||||
Field("KeyField2", 'Date', is_range_key=True),
|
||||
Field("StartDate", 'Date', range_hash_type='min'),
|
||||
Field("EndDate", 'Date', range_hash_type='max'),
|
||||
Field("UInt8_", 'UInt8', default_value_for_get=55),
|
||||
Field("UInt16_", 'UInt16', default_value_for_get=66),
|
||||
Field("UInt32_", 'UInt32', default_value_for_get=77),
|
||||
Field("UInt64_", 'UInt64', default_value_for_get=88),
|
||||
Field("Int8_", 'Int8', default_value_for_get=-55),
|
||||
Field("Int16_", 'Int16', default_value_for_get=-66),
|
||||
Field("Int32_", 'Int32', default_value_for_get=-77),
|
||||
Field("Int64_", 'Int64', default_value_for_get=-88),
|
||||
Field("UUID_", 'UUID', default_value_for_get='550e8400-0000-0000-0000-000000000000'),
|
||||
Field("Date_", 'Date', default_value_for_get='2018-12-30'),
|
||||
Field("DateTime_", 'DateTime', default_value_for_get='2018-12-30 00:00:00'),
|
||||
Field("String_", 'String', default_value_for_get='hi'),
|
||||
Field("Float32_", 'Float32', default_value_for_get=555.11),
|
||||
Field("Float64_", 'Float64', default_value_for_get=777.11),
|
||||
]
|
||||
}
|
||||
|
||||
VALUES = {
|
||||
"simple": [
|
||||
[1, 22, 333, 4444, 55555, -6, -77,
|
||||
-888, -999, '550e8400-e29b-41d4-a716-446655440003',
|
||||
'1973-06-28', '1985-02-28 23:43:25', 'hello', 22.543, 3332154213.4, 0],
|
||||
[2, 3, 4, 5, 6, -7, -8,
|
||||
-9, -10, '550e8400-e29b-41d4-a716-446655440002',
|
||||
'1978-06-28', '1986-02-28 23:42:25', 'hello', 21.543, 3222154213.4, 1]
|
||||
],
|
||||
"complex": [
|
||||
[1, 'world', 22, 333, 4444, 55555, -6,
|
||||
-77, -888, -999, '550e8400-e29b-41d4-a716-446655440003',
|
||||
'1973-06-28', '1985-02-28 23:43:25',
|
||||
'hello', 22.543, 3332154213.4],
|
||||
[2, 'qwerty2', 52, 2345, 6544, 9191991, -2,
|
||||
-717, -81818, -92929, '550e8400-e29b-41d4-a716-446655440007',
|
||||
'1975-09-28', '2000-02-28 23:33:24',
|
||||
'my', 255.543, 3332221.44]
|
||||
|
||||
],
|
||||
"ranged": [
|
||||
[1, '2019-02-10', '2019-02-01', '2019-02-28',
|
||||
22, 333, 4444, 55555, -6, -77, -888, -999,
|
||||
'550e8400-e29b-41d4-a716-446655440003',
|
||||
'1973-06-28', '1985-02-28 23:43:25', 'hello',
|
||||
22.543, 3332154213.4],
|
||||
[2, '2019-04-10', '2019-04-01', '2019-04-28',
|
||||
11, 3223, 41444, 52515, -65, -747, -8388, -9099,
|
||||
'550e8400-e29b-41d4-a716-446655440004',
|
||||
'1973-06-29', '2002-02-28 23:23:25', '!!!!',
|
||||
32.543, 3332543.4]
|
||||
]
|
||||
}
|
||||
|
||||
LAYOUTS = [
|
||||
Layout("flat"),
|
||||
Layout("hashed"),
|
||||
Layout("cache"),
|
||||
Layout("complex_key_hashed"),
|
||||
Layout("complex_key_cache"),
|
||||
Layout("range_hashed"),
|
||||
Layout("direct"),
|
||||
Layout("complex_key_direct")
|
||||
]
|
||||
|
||||
SOURCES = [
|
||||
SourceCassandra("Cassandra", "localhost", "9043", "cassandra1", "9042", "", ""),
|
||||
SourceMongo("MongoDB", "localhost", "27018", "mongo1", "27017", "root", "clickhouse"),
|
||||
SourceMongoURI("MongoDB_URI", "localhost", "27018", "mongo1", "27017", "root", "clickhouse"),
|
||||
SourceMySQL("MySQL", "localhost", "3308", "mysql1", "3306", "root", "clickhouse"),
|
||||
SourceClickHouse("RemoteClickHouse", "localhost", "9000", "clickhouse1", "9000", "default", ""),
|
||||
SourceClickHouse("LocalClickHouse", "localhost", "9000", "node", "9000", "default", ""),
|
||||
SourceFile("File", "localhost", "9000", "node", "9000", "", ""),
|
||||
SourceExecutableHashed("ExecutableHashed", "localhost", "9000", "node", "9000", "", ""),
|
||||
SourceExecutableCache("ExecutableCache", "localhost", "9000", "node", "9000", "", ""),
|
||||
SourceHTTP("SourceHTTP", "localhost", "9000", "clickhouse1", "9000", "", ""),
|
||||
SourceHTTPS("SourceHTTPS", "localhost", "9000", "clickhouse1", "9000", "", ""),
|
||||
]
|
||||
|
||||
DICTIONARIES = []
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
|
||||
|
||||
def get_dict(source, layout, fields, suffix_name=''):
|
||||
global dict_configs_path
|
||||
|
||||
structure = DictionaryStructure(layout, fields)
|
||||
dict_name = source.name + "_" + layout.name + '_' + suffix_name
|
||||
dict_path = os.path.join(dict_configs_path, dict_name + '.xml')
|
||||
dictionary = Dictionary(dict_name, structure, source, dict_path, "table_" + dict_name, fields)
|
||||
dictionary.generate_config()
|
||||
return dictionary
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global DICTIONARIES
|
||||
global cluster
|
||||
global node
|
||||
global dict_configs_path
|
||||
|
||||
for f in os.listdir(dict_configs_path):
|
||||
os.remove(os.path.join(dict_configs_path, f))
|
||||
|
||||
for layout in LAYOUTS:
|
||||
for source in SOURCES:
|
||||
if source.compatible_with_layout(layout):
|
||||
DICTIONARIES.append(get_dict(source, layout, FIELDS[layout.layout_type]))
|
||||
else:
|
||||
print "Source", source.name, "incompatible with layout", layout.name
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
cluster.add_instance('clickhouse1', main_configs=main_configs)
|
||||
|
||||
dictionaries = []
|
||||
for fname in os.listdir(dict_configs_path):
|
||||
dictionaries.append(os.path.join(dict_configs_path, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries, with_mysql=True,
|
||||
with_mongo=True, with_redis=True, with_cassandra=True)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
for dictionary in DICTIONARIES:
|
||||
print "Preparing", dictionary.name
|
||||
dictionary.prepare_source(cluster)
|
||||
print "Prepared"
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def get_dictionaries(fold, total_folds, all_dicts):
|
||||
chunk_len = int(math.ceil(len(all_dicts) / float(total_folds)))
|
||||
if chunk_len * fold >= len(all_dicts):
|
||||
return []
|
||||
return all_dicts[fold * chunk_len: (fold + 1) * chunk_len]
|
||||
|
||||
|
||||
def remove_mysql_dicts():
|
||||
"""
|
||||
We have false-positive race condition in our openSSL version.
|
||||
MySQL dictionary use OpenSSL, so to prevent known failure we
|
||||
disable tests for these dictionaries.
|
||||
|
||||
Read of size 8 at 0x7b3c00005dd0 by thread T61 (mutexes: write M1010349240585225536):
|
||||
#0 EVP_CIPHER_mode <null> (clickhouse+0x13b2223b)
|
||||
#1 do_ssl3_write <null> (clickhouse+0x13a137bc)
|
||||
#2 ssl3_write_bytes <null> (clickhouse+0x13a12387)
|
||||
#3 ssl3_write <null> (clickhouse+0x139db0e6)
|
||||
#4 ssl_write_internal <null> (clickhouse+0x139eddce)
|
||||
#5 SSL_write <null> (clickhouse+0x139edf20)
|
||||
#6 ma_tls_write <null> (clickhouse+0x139c7557)
|
||||
#7 ma_pvio_tls_write <null> (clickhouse+0x139a8f59)
|
||||
#8 ma_pvio_write <null> (clickhouse+0x139a8488)
|
||||
#9 ma_net_real_write <null> (clickhouse+0x139a4e2c)
|
||||
#10 ma_net_write_command <null> (clickhouse+0x139a546d)
|
||||
#11 mthd_my_send_cmd <null> (clickhouse+0x13992546)
|
||||
#12 mysql_close_slow_part <null> (clickhouse+0x13999afd)
|
||||
#13 mysql_close <null> (clickhouse+0x13999071)
|
||||
#14 mysqlxx::Connection::~Connection() <null> (clickhouse+0x1370f814)
|
||||
#15 mysqlxx::Pool::~Pool() <null> (clickhouse+0x13715a7b)
|
||||
|
||||
TODO remove this when open ssl will be fixed or thread sanitizer will be suppressed
|
||||
"""
|
||||
|
||||
# global DICTIONARIES
|
||||
# DICTIONARIES = [d for d in DICTIONARIES if not d.name.startswith("MySQL")]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("fold", list(range(10)))
|
||||
def test_simple_dictionaries(started_cluster, fold):
|
||||
if node.is_built_with_thread_sanitizer():
|
||||
remove_mysql_dicts()
|
||||
|
||||
fields = FIELDS["simple"]
|
||||
values = VALUES["simple"]
|
||||
data = [Row(fields, vals) for vals in values]
|
||||
|
||||
all_simple_dicts = [d for d in DICTIONARIES if d.structure.layout.layout_type == "simple"]
|
||||
simple_dicts = get_dictionaries(fold, 10, all_simple_dicts)
|
||||
|
||||
print "Length of dicts:", len(simple_dicts)
|
||||
for dct in simple_dicts:
|
||||
dct.load_data(data)
|
||||
|
||||
node.query("system reload dictionaries")
|
||||
|
||||
queries_with_answers = []
|
||||
for dct in simple_dicts:
|
||||
for row in data:
|
||||
for field in fields:
|
||||
if not field.is_key:
|
||||
for query in dct.get_select_get_queries(field, row):
|
||||
queries_with_answers.append((query, row.get_value_by_name(field.name)))
|
||||
|
||||
for query in dct.get_select_has_queries(field, row):
|
||||
queries_with_answers.append((query, 1))
|
||||
|
||||
for query in dct.get_select_get_or_default_queries(field, row):
|
||||
queries_with_answers.append((query, field.default_value_for_get))
|
||||
for query in dct.get_hierarchical_queries(data[0]):
|
||||
queries_with_answers.append((query, [1]))
|
||||
|
||||
for query in dct.get_hierarchical_queries(data[1]):
|
||||
queries_with_answers.append((query, [2, 1]))
|
||||
|
||||
for query in dct.get_is_in_queries(data[0], data[1]):
|
||||
queries_with_answers.append((query, 0))
|
||||
|
||||
for query in dct.get_is_in_queries(data[1], data[0]):
|
||||
queries_with_answers.append((query, 1))
|
||||
|
||||
for query, answer in queries_with_answers:
|
||||
print query
|
||||
if isinstance(answer, list):
|
||||
answer = str(answer).replace(' ', '')
|
||||
assert node.query(query) == str(answer) + '\n'
|
||||
|
||||
|
||||
@pytest.mark.parametrize("fold", list(range(10)))
|
||||
def test_complex_dictionaries(started_cluster, fold):
|
||||
if node.is_built_with_thread_sanitizer():
|
||||
remove_mysql_dicts()
|
||||
|
||||
fields = FIELDS["complex"]
|
||||
values = VALUES["complex"]
|
||||
data = [Row(fields, vals) for vals in values]
|
||||
|
||||
all_complex_dicts = [d for d in DICTIONARIES if d.structure.layout.layout_type == "complex"]
|
||||
complex_dicts = get_dictionaries(fold, 10, all_complex_dicts)
|
||||
|
||||
for dct in complex_dicts:
|
||||
dct.load_data(data)
|
||||
|
||||
node.query("system reload dictionaries")
|
||||
|
||||
queries_with_answers = []
|
||||
for dct in complex_dicts:
|
||||
for row in data:
|
||||
for field in fields:
|
||||
if not field.is_key:
|
||||
for query in dct.get_select_get_queries(field, row):
|
||||
queries_with_answers.append((query, row.get_value_by_name(field.name)))
|
||||
|
||||
for query in dct.get_select_has_queries(field, row):
|
||||
queries_with_answers.append((query, 1))
|
||||
|
||||
for query in dct.get_select_get_or_default_queries(field, row):
|
||||
queries_with_answers.append((query, field.default_value_for_get))
|
||||
|
||||
for query, answer in queries_with_answers:
|
||||
print query
|
||||
assert node.query(query) == str(answer) + '\n'
|
||||
|
||||
|
||||
@pytest.mark.parametrize("fold", list(range(10)))
|
||||
def test_ranged_dictionaries(started_cluster, fold):
|
||||
if node.is_built_with_thread_sanitizer():
|
||||
remove_mysql_dicts()
|
||||
|
||||
fields = FIELDS["ranged"]
|
||||
values = VALUES["ranged"]
|
||||
data = [Row(fields, vals) for vals in values]
|
||||
|
||||
all_ranged_dicts = [d for d in DICTIONARIES if d.structure.layout.layout_type == "ranged"]
|
||||
ranged_dicts = get_dictionaries(fold, 10, all_ranged_dicts)
|
||||
|
||||
for dct in ranged_dicts:
|
||||
dct.load_data(data)
|
||||
|
||||
node.query("system reload dictionaries")
|
||||
|
||||
queries_with_answers = []
|
||||
for dct in ranged_dicts:
|
||||
for row in data:
|
||||
for field in fields:
|
||||
if not field.is_key and not field.is_range:
|
||||
for query in dct.get_select_get_queries(field, row):
|
||||
queries_with_answers.append((query, row.get_value_by_name(field.name)))
|
||||
|
||||
for query, answer in queries_with_answers:
|
||||
print query
|
||||
assert node.query(query) == str(answer) + '\n'
|
@ -0,0 +1,239 @@
|
||||
import os
|
||||
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
|
||||
KEY_FIELDS = {
|
||||
"simple": [
|
||||
Field("KeyField", 'UInt64', is_key=True, default_value_for_get=9999999)
|
||||
],
|
||||
"complex": [
|
||||
Field("KeyField1", 'UInt64', is_key=True, default_value_for_get=9999999),
|
||||
Field("KeyField2", 'String', is_key=True, default_value_for_get='xxxxxxxxx')
|
||||
],
|
||||
"ranged": [
|
||||
Field("KeyField1", 'UInt64', is_key=True),
|
||||
Field("KeyField2", 'Date', is_range_key=True)
|
||||
]
|
||||
}
|
||||
|
||||
START_FIELDS = {
|
||||
"simple": [],
|
||||
"complex": [],
|
||||
"ranged" : [
|
||||
Field("StartDate", 'Date', range_hash_type='min'),
|
||||
Field("EndDate", 'Date', range_hash_type='max')
|
||||
]
|
||||
}
|
||||
|
||||
MIDDLE_FIELDS = [
|
||||
Field("UInt8_", 'UInt8', default_value_for_get=55),
|
||||
Field("UInt16_", 'UInt16', default_value_for_get=66),
|
||||
Field("UInt32_", 'UInt32', default_value_for_get=77),
|
||||
Field("UInt64_", 'UInt64', default_value_for_get=88),
|
||||
Field("Int8_", 'Int8', default_value_for_get=-55),
|
||||
Field("Int16_", 'Int16', default_value_for_get=-66),
|
||||
Field("Int32_", 'Int32', default_value_for_get=-77),
|
||||
Field("Int64_", 'Int64', default_value_for_get=-88),
|
||||
Field("UUID_", 'UUID', default_value_for_get='550e8400-0000-0000-0000-000000000000'),
|
||||
Field("Date_", 'Date', default_value_for_get='2018-12-30'),
|
||||
Field("DateTime_", 'DateTime', default_value_for_get='2018-12-30 00:00:00'),
|
||||
Field("String_", 'String', default_value_for_get='hi'),
|
||||
Field("Float32_", 'Float32', default_value_for_get=555.11),
|
||||
Field("Float64_", 'Float64', default_value_for_get=777.11),
|
||||
]
|
||||
|
||||
END_FIELDS = {
|
||||
"simple" : [
|
||||
Field("ParentKeyField", "UInt64", default_value_for_get=444, hierarchical=True)
|
||||
],
|
||||
"complex" : [],
|
||||
"ranged" : []
|
||||
}
|
||||
|
||||
LAYOUTS_SIMPLE = ["flat", "hashed", "cache", "direct"]
|
||||
LAYOUTS_COMPLEX = ["complex_key_hashed", "complex_key_cache", "complex_key_direct"]
|
||||
LAYOUTS_RANGED = ["range_hashed"]
|
||||
|
||||
VALUES = {
|
||||
"simple": [
|
||||
[1, 22, 333, 4444, 55555, -6, -77,
|
||||
-888, -999, '550e8400-e29b-41d4-a716-446655440003',
|
||||
'1973-06-28', '1985-02-28 23:43:25', 'hello', 22.543, 3332154213.4, 0],
|
||||
[2, 3, 4, 5, 6, -7, -8,
|
||||
-9, -10, '550e8400-e29b-41d4-a716-446655440002',
|
||||
'1978-06-28', '1986-02-28 23:42:25', 'hello', 21.543, 3222154213.4, 1]
|
||||
],
|
||||
"complex": [
|
||||
[1, 'world', 22, 333, 4444, 55555, -6,
|
||||
-77, -888, -999, '550e8400-e29b-41d4-a716-446655440003',
|
||||
'1973-06-28', '1985-02-28 23:43:25',
|
||||
'hello', 22.543, 3332154213.4],
|
||||
[2, 'qwerty2', 52, 2345, 6544, 9191991, -2,
|
||||
-717, -81818, -92929, '550e8400-e29b-41d4-a716-446655440007',
|
||||
'1975-09-28', '2000-02-28 23:33:24',
|
||||
'my', 255.543, 3332221.44]
|
||||
],
|
||||
"ranged": [
|
||||
[1, '2019-02-10', '2019-02-01', '2019-02-28',
|
||||
22, 333, 4444, 55555, -6, -77, -888, -999,
|
||||
'550e8400-e29b-41d4-a716-446655440003',
|
||||
'1973-06-28', '1985-02-28 23:43:25', 'hello',
|
||||
22.543, 3332154213.4],
|
||||
[2, '2019-04-10', '2019-04-01', '2019-04-28',
|
||||
11, 3223, 41444, 52515, -65, -747, -8388, -9099,
|
||||
'550e8400-e29b-41d4-a716-446655440004',
|
||||
'1973-06-29', '2002-02-28 23:23:25', '!!!!',
|
||||
32.543, 3332543.4]
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
DICT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'configs/dictionaries')
|
||||
|
||||
def get_dict(source, layout, fields, suffix_name=''):
|
||||
global DICT_CONFIG_PATH
|
||||
structure = DictionaryStructure(layout, fields)
|
||||
dict_name = source.name + "_" + layout.name + '_' + suffix_name
|
||||
dict_path = os.path.join(DICT_CONFIG_PATH, dict_name + '.xml')
|
||||
dictionary = Dictionary(dict_name, structure, source, dict_path, "table_" + dict_name, fields)
|
||||
dictionary.generate_config()
|
||||
return dictionary
|
||||
|
||||
class SimpleLayoutTester:
|
||||
def __init__(self):
|
||||
self.fields = KEY_FIELDS["simple"] + START_FIELDS["simple"] + MIDDLE_FIELDS + END_FIELDS["simple"]
|
||||
self.values = VALUES["simple"]
|
||||
self.data = [Row(self.fields, vals) for vals in self.values]
|
||||
self.layout_to_dictionary = dict()
|
||||
|
||||
def create_dictionaries(self, source_):
|
||||
for layout in LAYOUTS_SIMPLE:
|
||||
if source_.compatible_with_layout(Layout(layout)):
|
||||
self.layout_to_dictionary[layout] = get_dict(source_, Layout(layout), self.fields)
|
||||
|
||||
def prepare(self, cluster_):
|
||||
for _, dictionary in self.layout_to_dictionary.items():
|
||||
dictionary.prepare_source(cluster_)
|
||||
dictionary.load_data(self.data)
|
||||
|
||||
def execute(self, layout_name, node):
|
||||
if not self.layout_to_dictionary.has_key(layout_name):
|
||||
raise RuntimeError("Source doesn't support layout: {}".format(layout_name))
|
||||
|
||||
dct = self.layout_to_dictionary[layout_name]
|
||||
|
||||
node.query("system reload dictionaries")
|
||||
queries_with_answers = []
|
||||
|
||||
for row in self.data:
|
||||
for field in self.fields:
|
||||
if not field.is_key:
|
||||
for query in dct.get_select_get_queries(field, row):
|
||||
queries_with_answers.append((query, row.get_value_by_name(field.name)))
|
||||
|
||||
for query in dct.get_select_has_queries(field, row):
|
||||
queries_with_answers.append((query, 1))
|
||||
|
||||
for query in dct.get_select_get_or_default_queries(field, row):
|
||||
queries_with_answers.append((query, field.default_value_for_get))
|
||||
|
||||
for query in dct.get_hierarchical_queries(self.data[0]):
|
||||
queries_with_answers.append((query, [1]))
|
||||
|
||||
for query in dct.get_hierarchical_queries(self.data[1]):
|
||||
queries_with_answers.append((query, [2, 1]))
|
||||
|
||||
for query in dct.get_is_in_queries(self.data[0], self.data[1]):
|
||||
queries_with_answers.append((query, 0))
|
||||
|
||||
for query in dct.get_is_in_queries(self.data[1], self.data[0]):
|
||||
queries_with_answers.append((query, 1))
|
||||
|
||||
for query, answer in queries_with_answers:
|
||||
# print query
|
||||
if isinstance(answer, list):
|
||||
answer = str(answer).replace(' ', '')
|
||||
assert node.query(query) == str(answer) + '\n'
|
||||
|
||||
|
||||
class ComplexLayoutTester:
|
||||
def __init__(self):
|
||||
self.fields = KEY_FIELDS["complex"] + START_FIELDS["complex"] + MIDDLE_FIELDS + END_FIELDS["complex"]
|
||||
self.values = VALUES["complex"]
|
||||
self.data = [Row(self.fields, vals) for vals in self.values]
|
||||
self.layout_to_dictionary = dict()
|
||||
|
||||
def create_dictionaries(self, source_):
|
||||
for layout in LAYOUTS_COMPLEX:
|
||||
if source_.compatible_with_layout(Layout(layout)):
|
||||
self.layout_to_dictionary[layout] = get_dict(source_, Layout(layout), self.fields)
|
||||
|
||||
def prepare(self, cluster_):
|
||||
for _, dictionary in self.layout_to_dictionary.items():
|
||||
dictionary.prepare_source(cluster_)
|
||||
dictionary.load_data(self.data)
|
||||
|
||||
def execute(self, layout_name, node):
|
||||
if not self.layout_to_dictionary.has_key(layout_name):
|
||||
raise RuntimeError("Source doesn't support layout: {}".format(layout_name))
|
||||
|
||||
dct = self.layout_to_dictionary[layout_name]
|
||||
|
||||
node.query("system reload dictionaries")
|
||||
queries_with_answers = []
|
||||
|
||||
for row in self.data:
|
||||
for field in self.fields:
|
||||
if not field.is_key:
|
||||
for query in dct.get_select_get_queries(field, row):
|
||||
queries_with_answers.append((query, row.get_value_by_name(field.name)))
|
||||
|
||||
for query in dct.get_select_has_queries(field, row):
|
||||
queries_with_answers.append((query, 1))
|
||||
|
||||
for query in dct.get_select_get_or_default_queries(field, row):
|
||||
queries_with_answers.append((query, field.default_value_for_get))
|
||||
|
||||
for query, answer in queries_with_answers:
|
||||
# print query
|
||||
assert node.query(query) == str(answer) + '\n'
|
||||
|
||||
|
||||
class RangedLayoutTester:
|
||||
def __init__(self):
|
||||
self.fields = KEY_FIELDS["ranged"] + START_FIELDS["ranged"] + MIDDLE_FIELDS + END_FIELDS["ranged"]
|
||||
self.values = VALUES["ranged"]
|
||||
self.data = [Row(self.fields, vals) for vals in self.values]
|
||||
self.layout_to_dictionary = dict()
|
||||
|
||||
def create_dictionaries(self, source_):
|
||||
for layout in LAYOUTS_RANGED:
|
||||
if source_.compatible_with_layout(Layout(layout)):
|
||||
self.layout_to_dictionary[layout] = get_dict(source_, Layout(layout), self.fields)
|
||||
|
||||
def prepare(self, cluster_):
|
||||
for _, dictionary in self.layout_to_dictionary.items():
|
||||
dictionary.prepare_source(cluster_)
|
||||
dictionary.load_data(self.data)
|
||||
|
||||
def execute(self, layout_name, node):
|
||||
|
||||
if not self.layout_to_dictionary.has_key(layout_name):
|
||||
raise RuntimeError("Source doesn't support layout: {}".format(layout_name))
|
||||
|
||||
dct = self.layout_to_dictionary[layout_name]
|
||||
|
||||
node.query("system reload dictionaries")
|
||||
|
||||
queries_with_answers = []
|
||||
for row in self.data:
|
||||
for field in self.fields:
|
||||
if not field.is_key and not field.is_range:
|
||||
for query in dct.get_select_get_queries(field, row):
|
||||
queries_with_answers.append((query, row.get_value_by_name(field.name)))
|
||||
|
||||
for query, answer in queries_with_answers:
|
||||
# print query
|
||||
assert node.query(query) == str(answer) + '\n'
|
||||
|
@ -0,0 +1,82 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceCassandra
|
||||
|
||||
SOURCE = SourceCassandra("Cassandra", "localhost", "9043", "cassandra1", "9042", "", "")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries, with_cassandra=True)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_SIMPLE)
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_COMPLEX)
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -0,0 +1,82 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceClickHouse
|
||||
|
||||
SOURCE = SourceClickHouse("LocalClickHouse", "localhost", "9000", "node", "9000", "default", "")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_SIMPLE)
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_COMPLEX)
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -0,0 +1,84 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceClickHouse
|
||||
|
||||
SOURCE = SourceClickHouse("RemoteClickHouse", "localhost", "9000", "clickhouse1", "9000", "default", "")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
cluster.add_instance('clickhouse1', main_configs=main_configs)
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", list(set(LAYOUTS_SIMPLE).difference(set("cache"))) )
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", list(set(LAYOUTS_COMPLEX).difference(set("complex_key_cache"))))
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -0,0 +1,78 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceExecutableCache
|
||||
|
||||
SOURCE = SourceExecutableCache("ExecutableCache", "localhost", "9000", "node", "9000", "", "")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", ['cache'])
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", ['complex_key_cache'])
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
@ -0,0 +1,82 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceExecutableHashed
|
||||
|
||||
SOURCE = SourceExecutableHashed("ExecutableHashed", "localhost", "9000", "node", "9000", "", "")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", ['hashed'])
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", ['complex_key_hashed'])
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -0,0 +1,82 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceFile
|
||||
|
||||
SOURCE = SourceFile("File", "localhost", "9000", "node", "9000", "", "")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", set(LAYOUTS_SIMPLE).difference({'cache', 'direct'}) )
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", list(set(LAYOUTS_COMPLEX).difference({'complex_key_cache', 'complex_key_direct'})))
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -0,0 +1,84 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceHTTP
|
||||
|
||||
SOURCE = SourceHTTP("SourceHTTP", "localhost", "9000", "clickhouse1", "9000", "", "")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
cluster.add_instance('clickhouse1', main_configs=main_configs)
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_SIMPLE)
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_COMPLEX)
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -0,0 +1,84 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceHTTPS
|
||||
|
||||
SOURCE = SourceHTTPS("SourceHTTPS", "localhost", "9000", "clickhouse1", "9000", "", "")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
cluster.add_instance('clickhouse1', main_configs=main_configs)
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_SIMPLE)
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_COMPLEX)
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -0,0 +1,82 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceMongo
|
||||
|
||||
SOURCE = SourceMongo("MongoDB", "localhost", "27018", "mongo1", "27017", "root", "clickhouse")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries, with_mongo=True)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_SIMPLE)
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_COMPLEX)
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -0,0 +1,75 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceMongoURI
|
||||
|
||||
SOURCE = SourceMongoURI("MongoDB_URI", "localhost", "27018", "mongo1", "27017", "root", "clickhouse")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries, with_mongo=True)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
# See comment in SourceMongoURI
|
||||
@pytest.mark.parametrize("layout_name", ["flat"])
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
@ -0,0 +1,82 @@
|
||||
import os
|
||||
import math
|
||||
import pytest
|
||||
|
||||
from .common import *
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout
|
||||
from helpers.external_sources import SourceMySQL
|
||||
|
||||
SOURCE = SourceMySQL("MySQL", "localhost", "3308", "mysql1", "3306", "root", "clickhouse")
|
||||
|
||||
cluster = None
|
||||
node = None
|
||||
simple_tester = None
|
||||
complex_tester = None
|
||||
ranged_tester = None
|
||||
|
||||
|
||||
def setup_module(module):
|
||||
global cluster
|
||||
global node
|
||||
global simple_tester
|
||||
global complex_tester
|
||||
global ranged_tester
|
||||
|
||||
for f in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, f))
|
||||
|
||||
simple_tester = SimpleLayoutTester()
|
||||
simple_tester.create_dictionaries(SOURCE)
|
||||
|
||||
complex_tester = ComplexLayoutTester()
|
||||
complex_tester.create_dictionaries(SOURCE)
|
||||
|
||||
ranged_tester = RangedLayoutTester()
|
||||
ranged_tester.create_dictionaries(SOURCE)
|
||||
# Since that all .xml configs were created
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
dictionaries = []
|
||||
main_configs = []
|
||||
main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml'))
|
||||
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
dictionaries.append(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries, with_mysql=True)
|
||||
|
||||
|
||||
def teardown_module(module):
|
||||
global DICT_CONFIG_PATH
|
||||
for fname in os.listdir(DICT_CONFIG_PATH):
|
||||
os.remove(os.path.join(DICT_CONFIG_PATH, fname))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
simple_tester.prepare(cluster)
|
||||
complex_tester.prepare(cluster)
|
||||
ranged_tester.prepare(cluster)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_SIMPLE)
|
||||
def test_simple(started_cluster, layout_name):
|
||||
simple_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_COMPLEX)
|
||||
def test_complex(started_cluster, layout_name):
|
||||
complex_tester.execute(layout_name, node)
|
||||
|
||||
@pytest.mark.parametrize("layout_name", LAYOUTS_RANGED)
|
||||
def test_ranged(started_cluster, layout_name):
|
||||
ranged_tester.execute(layout_name, node)
|
@ -1 +1,2 @@
|
||||
0.1
|
||||
0.1
|
||||
|
@ -6,4 +6,10 @@ INSERT INTO test_joinGet VALUES ('ab', '1', 0.1), ('ab', '2', 0.2), ('cd', '3',
|
||||
|
||||
SELECT joinGet(test_joinGet, 'c', 'ab', '1');
|
||||
|
||||
CREATE TABLE test_lc(a LowCardinality(String), b LowCardinality(String), c Float64) ENGINE = Join(any, left, a, b);
|
||||
|
||||
INSERT INTO test_lc VALUES ('ab', '1', 0.1), ('ab', '2', 0.2), ('cd', '3', 0.3);
|
||||
|
||||
SELECT joinGet(test_lc, 'c', 'ab', '1');
|
||||
|
||||
DROP TABLE test_joinGet;
|
||||
|
@ -1,3 +0,0 @@
|
||||
6
|
||||
6
|
||||
6
|
@ -1,26 +0,0 @@
|
||||
DROP TABLE IF EXISTS t;
|
||||
CREATE TABLE t (x UInt8) ENGINE = TinyLog;
|
||||
|
||||
INSERT INTO t VALUES (1), (2), (3);
|
||||
INSERT INTO t SELECT * FROM t;
|
||||
SELECT count() FROM t;
|
||||
|
||||
DROP TABLE t;
|
||||
|
||||
|
||||
CREATE TABLE t (x UInt8) ENGINE = Log;
|
||||
|
||||
INSERT INTO t VALUES (1), (2), (3);
|
||||
INSERT INTO t SELECT * FROM t;
|
||||
SELECT count() FROM t;
|
||||
|
||||
DROP TABLE t;
|
||||
|
||||
|
||||
CREATE TABLE t (x UInt8) ENGINE = StripeLog;
|
||||
|
||||
INSERT INTO t VALUES (1), (2), (3);
|
||||
INSERT INTO t SELECT * FROM t;
|
||||
SELECT count() FROM t;
|
||||
|
||||
DROP TABLE t;
|
@ -1,6 +0,0 @@
|
||||
Testing TinyLog
|
||||
Done TinyLog
|
||||
Testing StripeLog
|
||||
Done StripeLog
|
||||
Testing Log
|
||||
Done Log
|
@ -1,85 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
|
||||
function thread_create {
|
||||
while true; do
|
||||
$CLICKHOUSE_CLIENT --query "CREATE TABLE IF NOT EXISTS $1 (x UInt64, s Array(Nullable(String))) ENGINE = $2" 2>&1 | grep -v -F 'Received exception from server' | grep -v -P 'Code: (60|57)'
|
||||
sleep 0.0$RANDOM
|
||||
done
|
||||
}
|
||||
|
||||
function thread_drop {
|
||||
while true; do
|
||||
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS $1" 2>&1 | grep -v -F 'Received exception from server' | grep -v -P 'Code: (60|57)'
|
||||
sleep 0.0$RANDOM
|
||||
done
|
||||
}
|
||||
|
||||
function thread_rename {
|
||||
while true; do
|
||||
$CLICKHOUSE_CLIENT --query "RENAME TABLE $1 TO $2" 2>&1 | grep -v -F 'Received exception from server' | grep -v -P 'Code: (60|57)'
|
||||
sleep 0.0$RANDOM
|
||||
done
|
||||
}
|
||||
|
||||
function thread_select {
|
||||
while true; do
|
||||
$CLICKHOUSE_CLIENT --query "SELECT * FROM $1 FORMAT Null" 2>&1 | grep -v -F 'Received exception from server' | grep -v -P 'Code: (60|218)'
|
||||
sleep 0.0$RANDOM
|
||||
done
|
||||
}
|
||||
|
||||
function thread_insert {
|
||||
while true; do
|
||||
$CLICKHOUSE_CLIENT --query "INSERT INTO $1 SELECT rand64(1), [toString(rand64(2))] FROM numbers($2)" 2>&1 | grep -v -F 'Received exception from server' | grep -v -P 'Code: (60|218)'
|
||||
sleep 0.0$RANDOM
|
||||
done
|
||||
}
|
||||
|
||||
function thread_insert_select {
|
||||
while true; do
|
||||
$CLICKHOUSE_CLIENT --query "INSERT INTO $1 SELECT * FROM $2" 2>&1 | grep -v -F 'Received exception from server' | grep -v -P 'Code: (60|218)'
|
||||
sleep 0.0$RANDOM
|
||||
done
|
||||
}
|
||||
|
||||
export -f thread_create
|
||||
export -f thread_drop
|
||||
export -f thread_rename
|
||||
export -f thread_select
|
||||
export -f thread_insert
|
||||
export -f thread_insert_select
|
||||
|
||||
|
||||
# Do randomized queries and expect nothing extraordinary happens.
|
||||
|
||||
function test_with_engine {
|
||||
echo "Testing $1"
|
||||
|
||||
timeout 10 bash -c "thread_create t1 $1" &
|
||||
timeout 10 bash -c "thread_create t2 $1" &
|
||||
timeout 10 bash -c 'thread_drop t1' &
|
||||
timeout 10 bash -c 'thread_drop t2' &
|
||||
timeout 10 bash -c 'thread_rename t1 t2' &
|
||||
timeout 10 bash -c 'thread_rename t2 t1' &
|
||||
timeout 10 bash -c 'thread_select t1' &
|
||||
timeout 10 bash -c 'thread_select t2' &
|
||||
timeout 10 bash -c 'thread_insert t1 5' &
|
||||
timeout 10 bash -c 'thread_insert t2 10' &
|
||||
timeout 10 bash -c 'thread_insert_select t1 t2' &
|
||||
timeout 10 bash -c 'thread_insert_select t2 t1' &
|
||||
|
||||
wait
|
||||
echo "Done $1"
|
||||
}
|
||||
|
||||
test_with_engine TinyLog
|
||||
test_with_engine StripeLog
|
||||
test_with_engine Log
|
@ -1,12 +0,0 @@
|
||||
DROP TABLE IF EXISTS t_local;
|
||||
DROP TABLE IF EXISTS t_dist;
|
||||
|
||||
create table t_local(a int) engine Log;
|
||||
create table t_dist (a int) engine Distributed(test_shard_localhost, currentDatabase(), 't_local', cityHash64(a));
|
||||
|
||||
set insert_distributed_sync = 1;
|
||||
|
||||
insert into t_dist values (1);
|
||||
|
||||
DROP TABLE t_local;
|
||||
DROP TABLE t_dist;
|
@ -0,0 +1,5 @@
|
||||
Starting clickhouse-server
|
||||
Waiting for clickhouse-server to start
|
||||
1
|
||||
Hello
|
||||
World
|
@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CLICKHOUSE_PORT_TCP=50111
|
||||
CLICKHOUSE_DATABASE=default
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
echo "Starting clickhouse-server"
|
||||
|
||||
$PORT
|
||||
|
||||
$CLICKHOUSE_BINARY server -- --tcp_port "$CLICKHOUSE_PORT_TCP" > server.log 2>&1 &
|
||||
PID=$!
|
||||
|
||||
function finish {
|
||||
kill $PID
|
||||
wait
|
||||
}
|
||||
trap finish EXIT
|
||||
|
||||
echo "Waiting for clickhouse-server to start"
|
||||
|
||||
for i in {1..30}; do
|
||||
sleep 1
|
||||
$CLICKHOUSE_CLIENT --query "SELECT 1" 2>/dev/null && break
|
||||
if [[ $i == 30 ]]; then
|
||||
cat server.log
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Check access rights
|
||||
|
||||
$CLICKHOUSE_CLIENT -n --query "
|
||||
DROP DATABASE IF EXISTS test;
|
||||
CREATE DATABASE test;
|
||||
USE test;
|
||||
|
||||
CREATE TABLE t (s String) ENGINE=TinyLog;
|
||||
INSERT INTO t VALUES ('Hello');
|
||||
SELECT * FROM t;
|
||||
DROP TABLE t;
|
||||
|
||||
CREATE TEMPORARY TABLE t (s String);
|
||||
INSERT INTO t VALUES ('World');
|
||||
SELECT * FROM t;
|
||||
";
|
@ -149,4 +149,3 @@
|
||||
00609_mv_index_in_in
|
||||
00510_materizlized_view_and_deduplication_zookeeper
|
||||
00738_lock_for_inner_table
|
||||
01505_log_distributed_deadlock
|
||||
|
@ -403,7 +403,7 @@ function generate_diagram() {
|
||||
var table_row = "";
|
||||
|
||||
table_row += "<tr>";
|
||||
table_row += "<td class='text-right w-15 align-middle'";
|
||||
table_row += "<td class='text-right text-nowrap w-15 align-middle'";
|
||||
if (filtered_results[j].system_full) {
|
||||
table_row += ' data-toggle="tooltip" data-placement="right" title="' + filtered_results[j].system_full + '"';
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
[
|
||||
{
|
||||
"system": "AWS c5.metal",
|
||||
"system": "AWS c5.metal 100GB",
|
||||
"system_full": "AWS c5.metal 96vCPU 192GiB 100GB SSD",
|
||||
"time": "2020-01-17 00:00:00",
|
||||
"kind": "cloud",
|
54
website/benchmark/hardware/results/aws_c5metal_300.json
Normal file
54
website/benchmark/hardware/results/aws_c5metal_300.json
Normal file
@ -0,0 +1,54 @@
|
||||
[
|
||||
{
|
||||
"system": "AWS c5.metal 300GB",
|
||||
"system_full": "AWS c5.metal 96vCPU 192GiB 300GB SSD",
|
||||
"time": "2020-09-23 00:00:00",
|
||||
"kind": "cloud",
|
||||
"result":
|
||||
[
|
||||
[0.012, 0.002, 0.002],
|
||||
[0.066, 0.018, 0.018],
|
||||
[0.066, 0.028, 0.027],
|
||||
[0.186, 0.033, 0.031],
|
||||
[0.362, 0.095, 0.093],
|
||||
[1.092, 0.141, 0.142],
|
||||
[0.035, 0.020, 0.021],
|
||||
[0.023, 0.018, 0.018],
|
||||
[0.303, 0.176, 0.181],
|
||||
[0.817, 0.198, 0.198],
|
||||
[0.322, 0.091, 0.092],
|
||||
[0.600, 0.098, 0.098],
|
||||
[1.059, 0.265, 0.253],
|
||||
[1.542, 0.318, 0.310],
|
||||
[0.682, 0.286, 0.283],
|
||||
[0.372, 0.320, 0.295],
|
||||
[1.610, 0.832, 0.750],
|
||||
[1.301, 0.492, 0.458],
|
||||
[3.446, 1.361, 1.330],
|
||||
[0.189, 0.050, 0.035],
|
||||
[9.246, 0.338, 0.265],
|
||||
[10.163, 0.277, 0.249],
|
||||
[19.616, 0.663, 0.639],
|
||||
[20.068, 0.418, 0.367],
|
||||
[1.812, 0.097, 0.093],
|
||||
[0.976, 0.090, 0.083],
|
||||
[2.458, 0.097, 0.095],
|
||||
[9.397, 0.344, 0.323],
|
||||
[7.320, 0.415, 0.413],
|
||||
[0.780, 0.753, 0.748],
|
||||
[1.328, 0.226, 0.223],
|
||||
[4.643, 0.339, 0.329],
|
||||
[4.136, 2.049, 2.021],
|
||||
[9.213, 1.080, 0.923],
|
||||
[9.192, 1.019, 0.959],
|
||||
[0.410, 0.360, 0.378],
|
||||
[0.244, 0.155, 0.163],
|
||||
[0.102, 0.077, 0.071],
|
||||
[0.045, 0.055, 0.049],
|
||||
[0.459, 0.318, 0.316],
|
||||
[0.069, 0.033, 0.026],
|
||||
[0.035, 0.027, 0.020],
|
||||
[0.019, 0.009, 0.010]
|
||||
]
|
||||
}
|
||||
]
|
54
website/benchmark/hardware/results/aws_c6metal.json
Normal file
54
website/benchmark/hardware/results/aws_c6metal.json
Normal file
@ -0,0 +1,54 @@
|
||||
[
|
||||
{
|
||||
"system": "AWS c6.metal (Graviton 2)",
|
||||
"system_full": "AWS c6.metal (Graviton 2) 64 CPU 128GiB 2x1.7TB local SSD md-RAID-0",
|
||||
"time": "2020-09-23 00:00:00",
|
||||
"kind": "cloud",
|
||||
"result":
|
||||
[
|
||||
[0.004, 0.003, 0.001],
|
||||
[0.085, 0.030, 0.032],
|
||||
[0.029, 0.028, 0.026],
|
||||
[0.047, 0.068, 0.070],
|
||||
[0.090, 0.075, 0.079],
|
||||
[0.140, 0.126, 0.124],
|
||||
[0.018, 0.013, 0.012],
|
||||
[0.032, 0.021, 0.032],
|
||||
[0.154, 0.139, 0.138],
|
||||
[0.204, 0.155, 0.156],
|
||||
[0.101, 0.091, 0.090],
|
||||
[0.104, 0.104, 0.100],
|
||||
[0.223, 0.203, 0.203],
|
||||
[0.273, 0.255, 0.253],
|
||||
[0.232, 0.212, 0.213],
|
||||
[0.230, 0.223, 0.223],
|
||||
[0.506, 0.484, 0.483],
|
||||
[0.334, 0.330, 0.316],
|
||||
[1.139, 1.085, 1.088],
|
||||
[0.065, 0.077, 0.054],
|
||||
[0.484, 0.315, 0.315],
|
||||
[0.545, 0.295, 0.291],
|
||||
[0.980, 0.661, 1.476],
|
||||
[1.415, 1.101, 0.675],
|
||||
[0.150, 0.086, 0.085],
|
||||
[0.094, 0.077, 0.078],
|
||||
[0.150, 0.087, 0.086],
|
||||
[0.478, 0.348, 0.346],
|
||||
[0.424, 0.403, 0.399],
|
||||
[1.435, 1.388, 1.417],
|
||||
[0.215, 0.178, 0.178],
|
||||
[0.378, 0.294, 0.289],
|
||||
[1.669, 1.590, 1.596],
|
||||
[1.105, 1.007, 1.010],
|
||||
[1.074, 1.041, 1.014],
|
||||
[0.339, 0.323, 0.323],
|
||||
[0.210, 0.199, 0.204],
|
||||
[0.096, 0.091, 0.092],
|
||||
[0.084, 0.080, 0.079],
|
||||
[0.425, 0.405, 0.423],
|
||||
[0.034, 0.025, 0.022],
|
||||
[0.022, 0.019, 0.018],
|
||||
[0.007, 0.007, 0.007]
|
||||
]
|
||||
}
|
||||
]
|
@ -1,7 +1,7 @@
|
||||
[
|
||||
{
|
||||
"system": "AWS m6g.16xlarge",
|
||||
"system_full": "AWS m6g.16xlarge (Graviton2) 64 vCPU, 256 GiB RAM, EBS",
|
||||
"system": "AWS m6g.16xlarge (Graviton 2)",
|
||||
"system_full": "AWS m6g.16xlarge (Graviton 2) 64 vCPU, 256 GiB RAM, EBS",
|
||||
"time": "2020-02-13 00:00:00",
|
||||
"kind": "cloud",
|
||||
"result":
|
54
website/benchmark/hardware/results/xeon_e5_2650_4hdd.json
Normal file
54
website/benchmark/hardware/results/xeon_e5_2650_4hdd.json
Normal file
@ -0,0 +1,54 @@
|
||||
[
|
||||
{
|
||||
"system": "Xeon E5-2650",
|
||||
"system_full": "Xeon E5-2650 v2 @ 2.60GHz, 2 sockets, 16 threads, 4xHDD RAID-10",
|
||||
"time": "2020-09-25 00:00:00",
|
||||
"kind": "server",
|
||||
"result":
|
||||
[
|
||||
[0.040, 0.002, 0.002],
|
||||
[0.698, 0.014, 0.013],
|
||||
[0.533, 0.030, 0.030],
|
||||
[0.700, 0.043, 0.046],
|
||||
[0.749, 0.108, 0.102],
|
||||
[1.350, 0.221, 0.259],
|
||||
[0.168, 0.020, 0.020],
|
||||
[0.096, 0.013, 0.013],
|
||||
[1.132, 0.406, 0.386],
|
||||
[1.279, 0.426, 0.440],
|
||||
[0.842, 0.153, 0.146],
|
||||
[1.042, 0.186, 0.182],
|
||||
[1.149, 0.536, 0.533],
|
||||
[1.734, 0.688, 0.683],
|
||||
[1.481, 0.688, 0.651],
|
||||
[1.100, 0.709, 0.700],
|
||||
[2.367, 1.668, 1.682],
|
||||
[1.687, 1.013, 0.988],
|
||||
[4.768, 3.647, 3.783],
|
||||
[0.599, 0.055, 0.040],
|
||||
[5.530, 0.646, 0.622],
|
||||
[6.658, 0.671, 0.648],
|
||||
[11.795, 1.645, 1.574],
|
||||
[19.248, 1.168, 0.906],
|
||||
[1.826, 0.224, 0.232],
|
||||
[0.964, 0.189, 0.187],
|
||||
[2.058, 0.234, 0.215],
|
||||
[5.811, 0.758, 0.704],
|
||||
[4.805, 1.014, 0.995],
|
||||
[2.272, 2.035, 1.838],
|
||||
[1.827, 0.546, 0.547],
|
||||
[3.643, 0.863, 0.834],
|
||||
[5.816, 5.069, 5.168],
|
||||
[6.585, 2.655, 2.756],
|
||||
[6.949, 2.681, 2.795],
|
||||
[1.325, 1.090, 1.072],
|
||||
[0.460, 0.183, 0.179],
|
||||
[1.000, 0.087, 0.091],
|
||||
[0.142, 0.051, 0.038],
|
||||
[0.808, 0.392, 0.391],
|
||||
[0.256, 0.021, 0.015],
|
||||
[0.132, 0.038, 0.012],
|
||||
[0.054, 0.006, 0.006]
|
||||
]
|
||||
}
|
||||
]
|
@ -0,0 +1,55 @@
|
||||
[
|
||||
{
|
||||
"system": "Yandex Cloud 32vCPU",
|
||||
"system_full": "Yandex Cloud Cascade Lake, 32 vCPU, 128 GB RAM, 300 GB SSD",
|
||||
"cpu_vendor": "Intel",
|
||||
"time": "2020-09-23 00:00:00",
|
||||
"kind": "cloud",
|
||||
"result":
|
||||
[
|
||||
[0.021, 0.001, 0.001],
|
||||
[0.051, 0.011, 0.010],
|
||||
[0.396, 0.025, 0.025],
|
||||
[1.400, 0.035, 0.033],
|
||||
[1.413, 0.095, 0.098],
|
||||
[2.272, 0.222, 0.208],
|
||||
[0.042, 0.014, 0.014],
|
||||
[0.024, 0.011, 0.010],
|
||||
[1.948, 0.311, 0.303],
|
||||
[2.267, 0.379, 0.348],
|
||||
[1.498, 0.138, 0.135],
|
||||
[1.563, 0.164, 0.155],
|
||||
[2.435, 0.544, 0.516],
|
||||
[3.937, 0.661, 0.659],
|
||||
[2.724, 0.727, 0.642],
|
||||
[1.795, 0.683, 0.641],
|
||||
[4.668, 1.682, 1.643],
|
||||
[3.802, 1.051, 0.895],
|
||||
[8.297, 3.835, 4.592],
|
||||
[1.427, 0.100, 0.033],
|
||||
[16.816, 0.652, 0.547],
|
||||
[19.159, 0.650, 0.532],
|
||||
[35.374, 1.538, 1.311],
|
||||
[32.736, 0.854, 0.699],
|
||||
[4.767, 0.203, 0.184],
|
||||
[2.249, 0.166, 0.158],
|
||||
[4.759, 0.207, 0.189],
|
||||
[16.826, 0.584, 0.529],
|
||||
[14.308, 0.920, 0.789],
|
||||
[1.137, 1.041, 0.992],
|
||||
[3.967, 0.545, 0.555],
|
||||
[9.196, 0.872, 0.789],
|
||||
[9.554, 5.501, 5.694],
|
||||
[17.810, 2.712, 2.329],
|
||||
[17.726, 2.653, 2.793],
|
||||
[1.260, 0.955, 0.978],
|
||||
[0.260, 0.171, 0.164],
|
||||
[0.092, 0.065, 0.069],
|
||||
[0.046, 0.041, 0.037],
|
||||
[0.475, 0.391, 0.383],
|
||||
[0.066, 0.021, 0.019],
|
||||
[0.023, 0.024, 0.011],
|
||||
[0.022, 0.005, 0.005]
|
||||
]
|
||||
}
|
||||
]
|
@ -0,0 +1,55 @@
|
||||
[
|
||||
{
|
||||
"system": "Yandex Cloud 80vCPU",
|
||||
"system_full": "Yandex Cloud Cascade Lake, 80 vCPU, 160 GB RAM, 4TB SSD",
|
||||
"cpu_vendor": "Intel",
|
||||
"time": "2020-09-23 00:00:00",
|
||||
"kind": "cloud",
|
||||
"result":
|
||||
[
|
||||
[0.024, 0.002, 0.002],
|
||||
[0.067, 0.012, 0.012],
|
||||
[0.104, 0.017, 0.017],
|
||||
[0.411, 0.020, 0.021],
|
||||
[0.577, 0.069, 0.068],
|
||||
[0.739, 0.123, 0.122],
|
||||
[0.038, 0.015, 0.014],
|
||||
[0.024, 0.012, 0.012],
|
||||
[0.625, 0.169, 0.168],
|
||||
[0.748, 0.216, 0.207],
|
||||
[0.471, 0.089, 0.082],
|
||||
[0.487, 0.092, 0.087],
|
||||
[0.818, 0.256, 0.245],
|
||||
[1.324, 0.352, 0.352],
|
||||
[0.927, 0.333, 0.319],
|
||||
[0.642, 0.376, 0.377],
|
||||
[1.686, 0.983, 0.959],
|
||||
[1.290, 0.588, 0.582],
|
||||
[3.105, 1.793, 1.818],
|
||||
[0.426, 0.031, 0.034],
|
||||
[5.559, 0.415, 0.344],
|
||||
[6.343, 0.435, 0.405],
|
||||
[11.779, 1.151, 1.101],
|
||||
[11.851, 0.537, 0.509],
|
||||
[1.530, 0.125, 0.126],
|
||||
[0.695, 0.103, 0.103],
|
||||
[1.531, 0.127, 0.119],
|
||||
[5.576, 0.541, 0.496],
|
||||
[4.718, 0.740, 0.719],
|
||||
[1.429, 1.467, 1.500],
|
||||
[1.309, 0.335, 0.322],
|
||||
[3.138, 0.505, 0.518],
|
||||
[5.481, 3.475, 3.512],
|
||||
[6.330, 1.877, 1.818],
|
||||
[6.238, 1.843, 1.813],
|
||||
[0.660, 0.626, 0.603],
|
||||
[0.251, 0.152, 0.151],
|
||||
[0.090, 0.058, 0.059],
|
||||
[0.041, 0.038, 0.034],
|
||||
[0.470, 0.376, 0.385],
|
||||
[0.076, 0.015, 0.018],
|
||||
[0.030, 0.018, 0.010],
|
||||
[0.024, 0.006, 0.005]
|
||||
]
|
||||
}
|
||||
]
|
Loading…
Reference in New Issue
Block a user