diff --git a/contrib/FP16-cmake/CMakeLists.txt b/contrib/FP16-cmake/CMakeLists.txt index f82ad705dcc..be67ee88e8b 100644 --- a/contrib/FP16-cmake/CMakeLists.txt +++ b/contrib/FP16-cmake/CMakeLists.txt @@ -1 +1,4 @@ -# See contrib/usearch-cmake/CMakeLists.txt +set (FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16/") + +add_library(_fp16 INTERFACE) +target_include_directories(_fp16 SYSTEM INTERFACE ${FP16_PROJECT_DIR}/include) diff --git a/contrib/SimSIMD b/contrib/SimSIMD index 91a76d1ac51..ff51434d90c 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit 91a76d1ac519b3b9dc8957734a3dabd985f00c26 +Subproject commit ff51434d90c66f916e94ff05b24530b127aa4cff diff --git a/contrib/SimSIMD-cmake/CMakeLists.txt b/contrib/SimSIMD-cmake/CMakeLists.txt index f82ad705dcc..f5dc4d63604 100644 --- a/contrib/SimSIMD-cmake/CMakeLists.txt +++ b/contrib/SimSIMD-cmake/CMakeLists.txt @@ -1 +1,4 @@ -# See contrib/usearch-cmake/CMakeLists.txt +set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD") + +add_library(_simsimd INTERFACE) +target_include_directories(_simsimd SYSTEM INTERFACE "${SIMSIMD_PROJECT_DIR}/include") diff --git a/contrib/usearch b/contrib/usearch index 7a8967cb442..d1d33eac94a 160000 --- a/contrib/usearch +++ b/contrib/usearch @@ -1 +1 @@ -Subproject commit 7a8967cb442b08ca20c3dd781414378e65957d37 +Subproject commit d1d33eac94acd3b628e0b446c927ec3295ef63c7 diff --git a/contrib/usearch-cmake/CMakeLists.txt b/contrib/usearch-cmake/CMakeLists.txt index df131e0c528..25f6ca82a74 100644 --- a/contrib/usearch-cmake/CMakeLists.txt +++ b/contrib/usearch-cmake/CMakeLists.txt @@ -1,14 +1,9 @@ -set(FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16") -set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD") set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch") add_library(_usearch INTERFACE) +target_include_directories(_usearch SYSTEM INTERFACE ${USEARCH_PROJECT_DIR}/include) -target_include_directories(_usearch SYSTEM INTERFACE - ${FP16_PROJECT_DIR}/include - ${SIMSIMD_PROJECT_DIR}/include - ${USEARCH_PROJECT_DIR}/include) - +target_link_libraries(_usearch INTERFACE _fp16) target_compile_definitions(_usearch INTERFACE USEARCH_USE_FP16LIB) # target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD) diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index 7825e3edd98..dfe6a420260 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.9.1.3278" +ARG VERSION="24.9.2.42" ARG PACKAGES="clickhouse-keeper" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 6a33023592c..991c25ad142 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -35,7 +35,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.9.1.3278" +ARG VERSION="24.9.2.42" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index f7c80286fe3..5dc88b49e31 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="24.9.1.3278" +ARG VERSION="24.9.2.42" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" #docker-official-library:off diff --git a/docker/test/integration/runner/misc/rabbitmq/enabled_plugins b/docker/test/integration/runner/misc/rabbitmq/enabled_plugins new file mode 100644 index 00000000000..a30892ff929 --- /dev/null +++ b/docker/test/integration/runner/misc/rabbitmq/enabled_plugins @@ -0,0 +1 @@ +[rabbitmq_consistent_hash_exchange]. \ No newline at end of file diff --git a/docker/test/integration/runner/misc/rabbitmq/rabbitmq.conf b/docker/test/integration/runner/misc/rabbitmq/rabbitmq.conf index 258a282907a..6da3758b08d 100644 --- a/docker/test/integration/runner/misc/rabbitmq/rabbitmq.conf +++ b/docker/test/integration/runner/misc/rabbitmq/rabbitmq.conf @@ -13,3 +13,5 @@ ssl_options.fail_if_no_peer_cert = false ssl_options.cacertfile = /etc/rabbitmq/ca-cert.pem ssl_options.certfile = /etc/rabbitmq/server-cert.pem ssl_options.keyfile = /etc/rabbitmq/server-key.pem + +vm_memory_high_watermark.absolute = 2GB diff --git a/docs/changelogs/v24.9.2.42-stable.md b/docs/changelogs/v24.9.2.42-stable.md new file mode 100644 index 00000000000..c6754cfc303 --- /dev/null +++ b/docs/changelogs/v24.9.2.42-stable.md @@ -0,0 +1,33 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.9.2.42-stable (de7c791a2ea) FIXME as compared to v24.9.1.3278-stable (6d058d82a8e) + +#### Improvement +* Backported in [#70091](https://github.com/ClickHouse/ClickHouse/issues/70091): Add `show_create_query_identifier_quoting_rule` to define identifier quoting behavior of the show create query result. Possible values: - `user_display`: When the identifiers is a keyword. - `when_necessary`: When the identifiers is one of `{"distinct", "all", "table"}`, or it can cause ambiguity: column names, dictionary attribute names. - `always`: Always quote identifiers. [#69448](https://github.com/ClickHouse/ClickHouse/pull/69448) ([tuanpach](https://github.com/tuanpach)). +* Backported in [#70100](https://github.com/ClickHouse/ClickHouse/issues/70100): Follow-up to https://github.com/ClickHouse/ClickHouse/pull/69346 Point 4 described there will work now as well:. [#69563](https://github.com/ClickHouse/ClickHouse/pull/69563) ([Vitaly Baranov](https://github.com/vitlibar)). +* Backported in [#70048](https://github.com/ClickHouse/ClickHouse/issues/70048): Add new column readonly_duration to the system.replicas table. Needed to be able to distinguish actual readonly replicas from sentinel ones in alerts. [#69871](https://github.com/ClickHouse/ClickHouse/pull/69871) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#70193](https://github.com/ClickHouse/ClickHouse/issues/70193): Fix crash when executing `create view t as (with recursive 42 as ttt select ttt);`. [#69676](https://github.com/ClickHouse/ClickHouse/pull/69676) ([Han Fei](https://github.com/hanfei1991)). +* Backported in [#70083](https://github.com/ClickHouse/ClickHouse/issues/70083): Closes [#69752](https://github.com/ClickHouse/ClickHouse/issues/69752). [#69985](https://github.com/ClickHouse/ClickHouse/pull/69985) ([pufit](https://github.com/pufit)). +* Backported in [#70070](https://github.com/ClickHouse/ClickHouse/issues/70070): Fixes `Block structure mismatch` for queries with nested views and `WHERE` condition. Fixes [#66209](https://github.com/ClickHouse/ClickHouse/issues/66209). [#70054](https://github.com/ClickHouse/ClickHouse/pull/70054) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#70168](https://github.com/ClickHouse/ClickHouse/issues/70168): Fix wrong LOGICAL_ERROR when replacing literals in ranges. [#70122](https://github.com/ClickHouse/ClickHouse/pull/70122) ([Pablo Marcos](https://github.com/pamarcos)). +* Backported in [#70238](https://github.com/ClickHouse/ClickHouse/issues/70238): Check for Nullable(Nothing) type during ALTER TABLE MODIFY COLUMN/QUERY to prevent tables with such data type. [#70123](https://github.com/ClickHouse/ClickHouse/pull/70123) ([Pavel Kruglov](https://github.com/Avogar)). +* Backported in [#70205](https://github.com/ClickHouse/ClickHouse/issues/70205): Fix wrong result with skipping index. [#70127](https://github.com/ClickHouse/ClickHouse/pull/70127) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#70185](https://github.com/ClickHouse/ClickHouse/issues/70185): Fix data race in ColumnObject/ColumnTuple decompress method that could lead to heap use after free. [#70137](https://github.com/ClickHouse/ClickHouse/pull/70137) ([Pavel Kruglov](https://github.com/Avogar)). +* Backported in [#70253](https://github.com/ClickHouse/ClickHouse/issues/70253): Fix possible hung in ALTER COLUMN with Dynamic type. [#70144](https://github.com/ClickHouse/ClickHouse/pull/70144) ([Pavel Kruglov](https://github.com/Avogar)). +* Backported in [#70230](https://github.com/ClickHouse/ClickHouse/issues/70230): Use correct `max_types` parameter during Dynamic type creation for JSON subcolumn. [#70147](https://github.com/ClickHouse/ClickHouse/pull/70147) ([Pavel Kruglov](https://github.com/Avogar)). +* Backported in [#70217](https://github.com/ClickHouse/ClickHouse/issues/70217): Fix the password being displayed in `system.query_log` for users with bcrypt password authentication method. [#70148](https://github.com/ClickHouse/ClickHouse/pull/70148) ([Nikolay Degterinsky](https://github.com/evillique)). +* Backported in [#70267](https://github.com/ClickHouse/ClickHouse/issues/70267): Respect setting allow_simdjson in JSON type parser. [#70218](https://github.com/ClickHouse/ClickHouse/pull/70218) ([Pavel Kruglov](https://github.com/Avogar)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#70052](https://github.com/ClickHouse/ClickHouse/issues/70052): Improve stateless test runner. [#69864](https://github.com/ClickHouse/ClickHouse/pull/69864) ([Alexey Katsman](https://github.com/alexkats)). +* Backported in [#70284](https://github.com/ClickHouse/ClickHouse/issues/70284): Improve pipdeptree generator for docker images. - Update requirements.txt for the integration tests runner container - Remove some small dependencies, improve `helpers/retry_decorator.py` - Upgrade docker-compose from EOL version 1 to version 2. [#70146](https://github.com/ClickHouse/ClickHouse/pull/70146) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#70261](https://github.com/ClickHouse/ClickHouse/issues/70261): Update test_storage_s3_queue/test.py. [#70159](https://github.com/ClickHouse/ClickHouse/pull/70159) ([Kseniia Sumarokova](https://github.com/kssenii)). + diff --git a/docs/en/operations/settings/merge-tree-settings.md b/docs/en/operations/settings/merge-tree-settings.md index 8a106720ee0..4863858358d 100644 --- a/docs/en/operations/settings/merge-tree-settings.md +++ b/docs/en/operations/settings/merge-tree-settings.md @@ -1057,12 +1057,12 @@ Default value: throw ## deduplicate_merge_projection_mode -Whether to allow create projection for the table with non-classic MergeTree, that is not (Replicated, Shared) MergeTree. If allowed, what is the action when merge projections, either drop or rebuild. So classic MergeTree would ignore this setting. +Whether to allow create projection for the table with non-classic MergeTree, that is not (Replicated, Shared) MergeTree. Ignore option is purely for compatibility which might result in incorrect answer. Otherwise, if allowed, what is the action when merge projections, either drop or rebuild. So classic MergeTree would ignore this setting. It also controls `OPTIMIZE DEDUPLICATE` as well, but has effect on all MergeTree family members. Similar to the option `lightweight_mutation_projection_mode`, it is also part level. Possible values: -- throw, drop, rebuild +- ignore, throw, drop, rebuild Default value: throw diff --git a/docs/en/sql-reference/functions/ip-address-functions.md b/docs/en/sql-reference/functions/ip-address-functions.md index 9416036aff1..0623e209852 100644 --- a/docs/en/sql-reference/functions/ip-address-functions.md +++ b/docs/en/sql-reference/functions/ip-address-functions.md @@ -316,6 +316,38 @@ Result: Same as `toIPv4`, but if the IPv4 address has an invalid format, it returns null. +**Syntax** + +```sql +toIPv4OrNull(value) +``` + +**Arguments** + +- `value` — The value with IPv4 address. + +**Returned value** + +- `value` converted to the current IPv4 address. [String](../data-types/string.md). + +**Example** + +Query: + +```sql +SELECT + toIPv4OrNull('192.168.0.1') AS s1, + toIPv4OrNull('192.168.0') AS s2 +``` + +Result: + +```response +┌─s1──────────┬─s2───┐ +│ 192.168.0.1 │ ᴺᵁᴸᴸ │ +└─────────────┴──────┘ +``` + ## toIPv6OrDefault(string) Same as `toIPv6`, but if the IPv6 address has an invalid format, it returns `::` (0 IPv6). diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 24489f2b7e0..8ab9a9d73b0 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -207,7 +207,31 @@ If `NULL` is passed, then the function returns type `Nullable(Nothing)`, which c **Syntax** ```sql -toTypeName(x) +toTypeName(value) +``` + +**Arguments** + +- `value` — The value with any arbitrary. + +**Returned value** + +- `value` converted to the current data type name. [String](../data-types/string.md). + +**Example** + +Query: + +```sql +SELECT toTypeName(123); +``` + +Result: + +```response +┌─toTypeName(123)─┐ +│ UInt8 │ +└─────────────────┘ ``` ## blockSize {#blockSize} diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index 2e9b0cf3080..29df041ccc6 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -272,8 +272,7 @@ ALTER TABLE table_name MODIFY COLUMN column_name RESET SETTING max_compress_bloc ## MATERIALIZE COLUMN -Materializes a column with a `DEFAULT` or `MATERIALIZED` value expression. -This statement can be used to rewrite existing column data after a `DEFAULT` or `MATERIALIZED` expression has been added or updated (which only updates the metadata but does not change existing data). +Materializes a column with a `DEFAULT` or `MATERIALIZED` value expression. When adding a materialized column using `ALTER TABLE table_name ADD COLUMN column_name MATERIALIZED`, existing rows without materialized values are not automatically filled. `MATERIALIZE COLUMN` statement can be used to rewrite existing column data after a `DEFAULT` or `MATERIALIZED` expression has been added or updated (which only updates the metadata but does not change existing data). Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). For columns with a new or updated `MATERIALIZED` value expression, all existing rows are rewritten. diff --git a/docs/en/sql-reference/statements/create/view.md b/docs/en/sql-reference/statements/create/view.md index 9d94f040648..54a31710050 100644 --- a/docs/en/sql-reference/statements/create/view.md +++ b/docs/en/sql-reference/statements/create/view.md @@ -135,15 +135,15 @@ To change SQL security for an existing view, use ALTER TABLE MODIFY SQL SECURITY { DEFINER | INVOKER | NONE } [DEFINER = { user | CURRENT_USER }] ``` -### Examples sql security +### Examples ```sql -CREATE test_view +CREATE VIEW test_view DEFINER = alice SQL SECURITY DEFINER AS SELECT ... ``` ```sql -CREATE test_view +CREATE VIEW test_view SQL SECURITY INVOKER AS SELECT ... ``` diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 1fba973c953..317a5b9a8fb 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -628,7 +628,9 @@ void loadStartupScripts(const Poco::Util::AbstractConfiguration & config, Contex auto condition_write_buffer = WriteBufferFromOwnString(); LOG_DEBUG(log, "Checking startup query condition `{}`", condition); - executeQuery(condition_read_buffer, condition_write_buffer, true, context, callback, QueryFlags{ .internal = true }, std::nullopt, {}); + auto startup_context = Context::createCopy(context); + startup_context->makeQueryContext(); + executeQuery(condition_read_buffer, condition_write_buffer, true, startup_context, callback, QueryFlags{ .internal = true }, std::nullopt, {}); auto result = condition_write_buffer.str(); @@ -648,7 +650,9 @@ void loadStartupScripts(const Poco::Util::AbstractConfiguration & config, Contex auto write_buffer = WriteBufferFromOwnString(); LOG_DEBUG(log, "Executing query `{}`", query); - executeQuery(read_buffer, write_buffer, true, context, callback, QueryFlags{ .internal = true }, std::nullopt, {}); + auto startup_context = Context::createCopy(context); + startup_context->makeQueryContext(); + executeQuery(read_buffer, write_buffer, true, startup_context, callback, QueryFlags{ .internal = true }, std::nullopt, {}); } } catch (...) diff --git a/pyproject.toml b/pyproject.toml index 25228daf8f7..69463d32483 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,19 +13,19 @@ max-statements=200 [tool.pylint.'MESSAGES CONTROL'] # pytest.mark.parametrize is not callable (not-callable) disable = ''' - missing-docstring, - too-few-public-methods, - invalid-name, - too-many-arguments, - too-many-locals, - too-many-instance-attributes, + bare-except, + broad-except, cell-var-from-loop, fixme, + invalid-name, + missing-docstring, + redefined-outer-name, + too-few-public-methods, + too-many-arguments, + too-many-instance-attributes, + too-many-locals, too-many-public-methods, wildcard-import, - redefined-outer-name, - broad-except, - bare-except, ''' [tool.isort] diff --git a/src/Access/AccessControl.cpp b/src/Access/AccessControl.cpp index 8e656372637..093f1a19618 100644 --- a/src/Access/AccessControl.cpp +++ b/src/Access/AccessControl.cpp @@ -623,7 +623,7 @@ AuthResult AccessControl::authenticate(const Credentials & credentials, const Po /// We use the same message for all authentication failures because we don't want to give away any unnecessary information for security reasons, /// only the log will show the exact reason. throw Exception(PreformattedMessage{message.str(), - "{}: Authentication failed: password is incorrect, or there is no user with such name.{}", + "{}: Authentication failed: password is incorrect, or there is no user with such name", std::vector{credentials.getUserName()}}, ErrorCodes::AUTHENTICATION_FAILED); } diff --git a/src/Coordination/CoordinationSettings.h b/src/Coordination/CoordinationSettings.h index 35a23fc9e78..bb1f012e785 100644 --- a/src/Coordination/CoordinationSettings.h +++ b/src/Coordination/CoordinationSettings.h @@ -54,6 +54,7 @@ struct Settings; M(UInt64, log_file_overallocate_size, 50 * 1024 * 1024, "If max_log_file_size is not set to 0, this value will be added to it for preallocating bytes on disk. If a log record is larger than this value, it could lead to uncaught out-of-space issues so a larger value is preferred", 0) \ M(UInt64, min_request_size_for_cache, 50 * 1024, "Minimal size of the request to cache the deserialization result. Caching can have negative effect on latency for smaller requests, set to 0 to disable", 0) \ M(UInt64, raft_limits_reconnect_limit, 50, "If connection to a peer is silent longer than this limit * (multiplied by heartbeat interval), we re-establish the connection.", 0) \ + M(UInt64, raft_limits_response_limit, 20, "Total wait time for a response is calculated by multiplying response_limit with heart_beat_interval_ms", 0) \ M(Bool, async_replication, false, "Enable async replication. All write and read guarantees are preserved while better performance is achieved. Settings is disabled by default to not break backwards compatibility.", 0) \ M(Bool, experimental_use_rocksdb, false, "Use rocksdb as backend storage", 0) \ M(UInt64, latest_logs_cache_size_threshold, 1 * 1024 * 1024 * 1024, "Maximum total size of in-memory cache of latest log entries.", 0) \ diff --git a/src/Coordination/KeeperContext.cpp b/src/Coordination/KeeperContext.cpp index dd2c1d59d56..6ae4a1ee61d 100644 --- a/src/Coordination/KeeperContext.cpp +++ b/src/Coordination/KeeperContext.cpp @@ -411,7 +411,9 @@ KeeperContext::Storage KeeperContext::getLogsPathFromConfig(const Poco::Util::Ab if (!fs::exists(path)) fs::create_directories(path); - return std::make_shared("LocalLogDisk", path); + auto disk = std::make_shared("LocalLogDisk", path); + disk->startup(Context::getGlobalContextInstance(), false); + return disk; }; /// the most specialized path @@ -437,7 +439,9 @@ KeeperContext::Storage KeeperContext::getSnapshotsPathFromConfig(const Poco::Uti if (!fs::exists(path)) fs::create_directories(path); - return std::make_shared("LocalSnapshotDisk", path); + auto disk = std::make_shared("LocalSnapshotDisk", path); + disk->startup(Context::getGlobalContextInstance(), false); + return disk; }; /// the most specialized path @@ -463,7 +467,9 @@ KeeperContext::Storage KeeperContext::getStatePathFromConfig(const Poco::Util::A if (!fs::exists(path)) fs::create_directories(path); - return std::make_shared("LocalStateFileDisk", path); + auto disk = std::make_shared("LocalStateFileDisk", path); + disk->startup(Context::getGlobalContextInstance(), false); + return disk; }; if (config.has("keeper_server.state_storage_disk")) diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index b005ecf5e1e..57b15245f19 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -506,6 +506,7 @@ void KeeperServer::launchRaftServer(const Poco::Util::AbstractConfiguration & co nuraft::raft_server::limits raft_limits; raft_limits.reconnect_limit_ = getValueOrMaxInt32AndLogWarning(coordination_settings->raft_limits_reconnect_limit, "raft_limits_reconnect_limit", log); + raft_limits.response_limit_ = getValueOrMaxInt32AndLogWarning(coordination_settings->raft_limits_response_limit, "response_limit", log); raft_instance->set_raft_limits(raft_limits); raft_instance->start_server(init_options.skip_initial_election_timeout_); @@ -1079,7 +1080,7 @@ ClusterUpdateActions KeeperServer::getRaftConfigurationDiff(const Poco::Util::Ab void KeeperServer::applyConfigUpdateWithReconfigDisabled(const ClusterUpdateAction& action) { - std::lock_guard _{server_write_mutex}; + std::unique_lock server_write_lock{server_write_mutex}; if (is_recovering) return; constexpr auto sleep_time = 500ms; @@ -1090,7 +1091,9 @@ void KeeperServer::applyConfigUpdateWithReconfigDisabled(const ClusterUpdateActi auto backoff_on_refusal = [&](size_t i) { LOG_INFO(log, "Update was not accepted (try {}), backing off for {}", i + 1, sleep_time * (i + 1)); + server_write_lock.unlock(); std::this_thread::sleep_for(sleep_time * (i + 1)); + server_write_lock.lock(); }; const auto & coordination_settings = keeper_context->getCoordinationSettings(); diff --git a/src/Core/SettingsEnums.cpp b/src/Core/SettingsEnums.cpp index e4887a2ec0a..e2e15b423c8 100644 --- a/src/Core/SettingsEnums.cpp +++ b/src/Core/SettingsEnums.cpp @@ -178,7 +178,8 @@ IMPLEMENT_SETTING_ENUM(LightweightMutationProjectionMode, ErrorCodes::BAD_ARGUME {"rebuild", LightweightMutationProjectionMode::REBUILD}}) IMPLEMENT_SETTING_ENUM(DeduplicateMergeProjectionMode, ErrorCodes::BAD_ARGUMENTS, - {{"throw", DeduplicateMergeProjectionMode::THROW}, + {{"ignore", DeduplicateMergeProjectionMode::IGNORE}, + {"throw", DeduplicateMergeProjectionMode::THROW}, {"drop", DeduplicateMergeProjectionMode::DROP}, {"rebuild", DeduplicateMergeProjectionMode::REBUILD}}) diff --git a/src/Core/SettingsEnums.h b/src/Core/SettingsEnums.h index 08778ae5a49..7e6d26e5823 100644 --- a/src/Core/SettingsEnums.h +++ b/src/Core/SettingsEnums.h @@ -314,6 +314,7 @@ DECLARE_SETTING_ENUM(LightweightMutationProjectionMode) enum class DeduplicateMergeProjectionMode : uint8_t { + IGNORE, THROW, DROP, REBUILD, diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp index c96f5f0c931..7055a7018ce 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp @@ -11,20 +11,6 @@ using namespace DB; - -namespace -{ - bool withFileCache(const ReadSettings & settings) - { - return settings.remote_fs_cache && settings.enable_filesystem_cache; - } - - bool withPageCache(const ReadSettings & settings, bool with_file_cache) - { - return settings.page_cache && !with_file_cache && settings.use_page_cache_for_disks_without_file_cache; - } -} - namespace DB { namespace ErrorCodes @@ -35,7 +21,7 @@ namespace ErrorCodes size_t chooseBufferSizeForRemoteReading(const DB::ReadSettings & settings, size_t file_size) { /// Only when cache is used we could download bigger portions of FileSegments than what we actually gonna read within particular task. - if (!withFileCache(settings)) + if (!settings.enable_filesystem_cache) return settings.remote_fs_buffer_size; /// Buffers used for prefetch and pre-download better to have enough size, but not bigger than the whole file. @@ -45,7 +31,6 @@ size_t chooseBufferSizeForRemoteReading(const DB::ReadSettings & settings, size_ ReadBufferFromRemoteFSGather::ReadBufferFromRemoteFSGather( ReadBufferCreator && read_buffer_creator_, const StoredObjects & blobs_to_read_, - const std::string & cache_path_prefix_, const ReadSettings & settings_, std::shared_ptr cache_log_, bool use_external_buffer_) @@ -54,12 +39,10 @@ ReadBufferFromRemoteFSGather::ReadBufferFromRemoteFSGather( , settings(settings_) , blobs_to_read(blobs_to_read_) , read_buffer_creator(std::move(read_buffer_creator_)) - , cache_path_prefix(cache_path_prefix_) , cache_log(settings.enable_filesystem_cache_log ? cache_log_ : nullptr) , query_id(CurrentThread::getQueryId()) , use_external_buffer(use_external_buffer_) - , with_file_cache(withFileCache(settings)) - , with_page_cache(withPageCache(settings, with_file_cache)) + , with_file_cache(settings.enable_filesystem_cache) , log(getLogger("ReadBufferFromRemoteFSGather")) { if (!blobs_to_read.empty()) @@ -74,47 +57,7 @@ SeekableReadBufferPtr ReadBufferFromRemoteFSGather::createImplementationBuffer(c } current_object = object; - const auto & object_path = object.remote_path; - - std::unique_ptr buf; - - if (with_file_cache) - { - if (settings.remote_fs_cache->isInitialized()) - { - auto cache_key = settings.remote_fs_cache->createKeyForPath(object_path); - buf = std::make_unique( - object_path, - cache_key, - settings.remote_fs_cache, - FileCache::getCommonUser(), - [=, this]() { return read_buffer_creator(/* restricted_seek */true, object); }, - settings, - query_id, - object.bytes_size, - /* allow_seeks */false, - /* use_external_buffer */true, - /* read_until_position */std::nullopt, - cache_log); - } - else - { - settings.remote_fs_cache->throwInitExceptionIfNeeded(); - } - } - - /// Can't wrap CachedOnDiskReadBufferFromFile in CachedInMemoryReadBufferFromFile because the - /// former doesn't support seeks. - if (with_page_cache && !buf) - { - auto inner = read_buffer_creator(/* restricted_seek */false, object); - auto cache_key = FileChunkAddress { .path = cache_path_prefix + object_path }; - buf = std::make_unique( - cache_key, settings.page_cache, std::move(inner), settings); - } - - if (!buf) - buf = read_buffer_creator(/* restricted_seek */true, object); + auto buf = read_buffer_creator(/* restricted_seek */true, object); if (read_until_position > start_offset && read_until_position < start_offset + object.bytes_size) buf->setReadUntilPosition(read_until_position - start_offset); diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.h b/src/Disks/IO/ReadBufferFromRemoteFSGather.h index 9f1cb681f1a..27f94a3e552 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.h +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.h @@ -26,7 +26,6 @@ public: ReadBufferFromRemoteFSGather( ReadBufferCreator && read_buffer_creator_, const StoredObjects & blobs_to_read_, - const std::string & cache_path_prefix_, const ReadSettings & settings_, std::shared_ptr cache_log_, bool use_external_buffer_); @@ -71,12 +70,10 @@ private: const ReadSettings settings; const StoredObjects blobs_to_read; const ReadBufferCreator read_buffer_creator; - const std::string cache_path_prefix; const std::shared_ptr cache_log; const String query_id; const bool use_external_buffer; const bool with_file_cache; - const bool with_page_cache; size_t read_until_position = 0; size_t file_offset_of_buffer_end = 0; diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp index fa48825e1a6..673c82806bd 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp @@ -210,63 +210,14 @@ std::unique_ptr AzureObjectStorage::readObject( /// NOLI auto settings_ptr = settings.get(); return std::make_unique( - client.get(), object.remote_path, patchSettings(read_settings), settings_ptr->max_single_read_retries, - settings_ptr->max_single_download_retries); -} - -std::unique_ptr AzureObjectStorage::readObjects( /// NOLINT - const StoredObjects & objects, - const ReadSettings & read_settings, - std::optional, - std::optional) const -{ - ReadSettings disk_read_settings = patchSettings(read_settings); - auto settings_ptr = settings.get(); - auto global_context = Context::getGlobalContextInstance(); - - auto read_buffer_creator = - [this, settings_ptr, disk_read_settings] - (bool restricted_seek, const StoredObject & object_) -> std::unique_ptr - { - return std::make_unique( - client.get(), - object_.remote_path, - disk_read_settings, - settings_ptr->max_single_read_retries, - settings_ptr->max_single_download_retries, - /* use_external_buffer */true, - restricted_seek); - }; - - switch (read_settings.remote_fs_method) - { - case RemoteFSReadMethod::read: - { - return std::make_unique( - std::move(read_buffer_creator), - objects, - "azure:", - disk_read_settings, - global_context->getFilesystemCacheLog(), - /* use_external_buffer */false); - } - case RemoteFSReadMethod::threadpool: - { - auto impl = std::make_unique( - std::move(read_buffer_creator), - objects, - "azure:", - disk_read_settings, - global_context->getFilesystemCacheLog(), - /* use_external_buffer */true); - - auto & reader = global_context->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER); - return std::make_unique( - std::move(impl), reader, disk_read_settings, - global_context->getAsyncReadCounters(), - global_context->getFilesystemReadPrefetchesLog()); - } - } + client.get(), + object.remote_path, + patchSettings(read_settings), + settings_ptr->max_single_read_retries, + settings_ptr->max_single_download_retries, + read_settings.remote_read_buffer_use_external_buffer, + read_settings.remote_read_buffer_restrict_seek, + /* read_until_position */0); } /// Open the file for write and return WriteBufferFromFileBase object. diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h index 15a0bfb9ac1..58225eccd90 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h @@ -51,12 +51,6 @@ public: std::optional read_hint = {}, std::optional file_size = {}) const override; - std::unique_ptr readObjects( /// NOLINT - const StoredObjects & objects, - const ReadSettings & read_settings, - std::optional read_hint = {}, - std::optional file_size = {}) const override; - /// Open the file for write and return WriteBufferFromFileBase object. std::unique_ptr writeObject( /// NOLINT const StoredObject & object, diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp index ab0d357119c..a59ee615454 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp @@ -48,9 +48,7 @@ CachedObjectStorage::generateObjectKeyPrefixForDirectoryPath(const std::string & ReadSettings CachedObjectStorage::patchSettings(const ReadSettings & read_settings) const { - ReadSettings modified_settings{read_settings}; - modified_settings.remote_fs_cache = cache; - return object_storage->patchSettings(modified_settings); + return object_storage->patchSettings(read_settings); } void CachedObjectStorage::startup() @@ -63,21 +61,45 @@ bool CachedObjectStorage::exists(const StoredObject & object) const return object_storage->exists(object); } -std::unique_ptr CachedObjectStorage::readObjects( /// NOLINT - const StoredObjects & objects, - const ReadSettings & read_settings, - std::optional read_hint, - std::optional file_size) const -{ - return object_storage->readObjects(objects, patchSettings(read_settings), read_hint, file_size); -} - std::unique_ptr CachedObjectStorage::readObject( /// NOLINT const StoredObject & object, const ReadSettings & read_settings, std::optional read_hint, std::optional file_size) const { + if (read_settings.enable_filesystem_cache) + { + if (cache->isInitialized()) + { + auto cache_key = cache->createKeyForPath(object.remote_path); + auto global_context = Context::getGlobalContextInstance(); + auto modified_read_settings = read_settings.withNestedBuffer(); + + auto read_buffer_creator = [=, this]() + { + return object_storage->readObject(object, patchSettings(read_settings), read_hint, file_size); + }; + + return std::make_unique( + object.remote_path, + cache_key, + cache, + FileCache::getCommonUser(), + read_buffer_creator, + modified_read_settings, + std::string(CurrentThread::getQueryId()), + object.bytes_size, + /* allow_seeks */!read_settings.remote_read_buffer_restrict_seek, + /* use_external_buffer */read_settings.remote_read_buffer_use_external_buffer, + /* read_until_position */std::nullopt, + global_context->getFilesystemCacheLog()); + } + else + { + cache->throwInitExceptionIfNeeded(); + } + } + return object_storage->readObject(object, patchSettings(read_settings), read_hint, file_size); } diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h index 03b013c2eed..b77baf21e40 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h @@ -37,12 +37,6 @@ public: std::optional read_hint = {}, std::optional file_size = {}) const override; - std::unique_ptr readObjects( /// NOLINT - const StoredObjects & objects, - const ReadSettings & read_settings, - std::optional read_hint = {}, - std::optional file_size = {}) const override; - /// Open the file for write and return WriteBufferFromFileBase object. std::unique_ptr writeObject( /// NOLINT const StoredObject & object, diff --git a/src/Disks/ObjectStorages/DiskObjectStorage.cpp b/src/Disks/ObjectStorages/DiskObjectStorage.cpp index 07e2edac129..474851df7d5 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorage.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorage.cpp @@ -11,6 +11,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -496,16 +499,60 @@ std::unique_ptr DiskObjectStorage::readFile( std::optional file_size) const { const auto storage_objects = metadata_storage->getStorageObjects(path); + auto global_context = Context::getGlobalContextInstance(); const bool file_can_be_empty = !file_size.has_value() || *file_size == 0; if (storage_objects.empty() && file_can_be_empty) return std::make_unique(); - return object_storage->readObjects( + auto read_settings = updateIOSchedulingSettings(settings, getReadResourceName(), getWriteResourceName()); + /// We wrap read buffer from object storage (read_buf = object_storage->readObject()) + /// inside ReadBufferFromRemoteFSGather, so add nested buffer setting. + read_settings = read_settings.withNestedBuffer(); + + auto read_buffer_creator = + [this, read_settings, read_hint, file_size] + (bool restricted_seek, const StoredObject & object_) mutable -> std::unique_ptr + { + read_settings.remote_read_buffer_restrict_seek = restricted_seek; + auto impl = object_storage->readObject(object_, read_settings, read_hint, file_size); + + if ((!object_storage->supportsCache() || !read_settings.enable_filesystem_cache) + && read_settings.page_cache && read_settings.use_page_cache_for_disks_without_file_cache) + { + /// Can't wrap CachedOnDiskReadBufferFromFile in CachedInMemoryReadBufferFromFile because the + /// former doesn't support seeks. + auto cache_path_prefix = fmt::format("{}:", magic_enum::enum_name(object_storage->getType())); + const auto object_namespace = object_storage->getObjectsNamespace(); + if (!object_namespace.empty()) + cache_path_prefix += object_namespace + "/"; + + const auto cache_key = FileChunkAddress { .path = cache_path_prefix + object_.remote_path }; + + impl = std::make_unique( + cache_key, read_settings.page_cache, std::move(impl), read_settings); + } + return impl; + }; + + const bool use_async_buffer = read_settings.remote_fs_method == RemoteFSReadMethod::threadpool; + auto impl = std::make_unique( + std::move(read_buffer_creator), storage_objects, - updateIOSchedulingSettings(settings, getReadResourceName(), getWriteResourceName()), - read_hint, - file_size); + read_settings, + global_context->getFilesystemCacheLog(), + /* use_external_buffer */use_async_buffer); + + if (use_async_buffer) + { + auto & reader = global_context->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER); + return std::make_unique( + std::move(impl), reader, read_settings, + global_context->getAsyncReadCounters(), + global_context->getFilesystemReadPrefetchesLog()); + + } + return impl; } std::unique_ptr DiskObjectStorage::writeFile( diff --git a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp index 512cc34ef44..182534529ea 100644 --- a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp +++ b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp @@ -82,28 +82,12 @@ std::unique_ptr HDFSObjectStorage::readObject( /// NOLIN initializeHDFSFS(); auto path = extractObjectKeyFromURL(object); return std::make_unique( - fs::path(url_without_path) / "", fs::path(data_directory) / path, config, patchSettings(read_settings)); -} - -std::unique_ptr HDFSObjectStorage::readObjects( /// NOLINT - const StoredObjects & objects, - const ReadSettings & read_settings, - std::optional, - std::optional) const -{ - initializeHDFSFS(); - auto disk_read_settings = patchSettings(read_settings); - auto read_buffer_creator = - [this, disk_read_settings] - (bool /* restricted_seek */, const StoredObject & object_) -> std::unique_ptr - { - auto path = extractObjectKeyFromURL(object_); - return std::make_unique( - fs::path(url_without_path) / "", fs::path(data_directory) / path, config, disk_read_settings, /* read_until_position */0, /* use_external_buffer */true); - }; - - return std::make_unique( - std::move(read_buffer_creator), objects, "hdfs:", disk_read_settings, nullptr, /* use_external_buffer */false); + fs::path(url_without_path) / "", + fs::path(data_directory) / path, + config, + patchSettings(read_settings), + /* read_until_position */0, + read_settings.remote_read_buffer_use_external_buffer); } std::unique_ptr HDFSObjectStorage::writeObject( /// NOLINT diff --git a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h index 21c2b0635ca..b53161beb76 100644 --- a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h +++ b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h @@ -69,12 +69,6 @@ public: std::optional read_hint = {}, std::optional file_size = {}) const override; - std::unique_ptr readObjects( /// NOLINT - const StoredObjects & objects, - const ReadSettings & read_settings, - std::optional read_hint = {}, - std::optional file_size = {}) const override; - /// Open the file for write and return WriteBufferFromFileBase object. std::unique_ptr writeObject( /// NOLINT const StoredObject & object, diff --git a/src/Disks/ObjectStorages/IObjectStorage.h b/src/Disks/ObjectStorages/IObjectStorage.h index 72f6d150d34..8dde96b8b16 100644 --- a/src/Disks/ObjectStorages/IObjectStorage.h +++ b/src/Disks/ObjectStorages/IObjectStorage.h @@ -150,13 +150,6 @@ public: std::optional read_hint = {}, std::optional file_size = {}) const = 0; - /// Read multiple objects with common prefix - virtual std::unique_ptr readObjects( /// NOLINT - const StoredObjects & objects, - const ReadSettings & read_settings, - std::optional read_hint = {}, - std::optional file_size = {}) const = 0; - /// Open the file for write and return WriteBufferFromFileBase object. virtual std::unique_ptr writeObject( /// NOLINT const StoredObject & object, diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp index 3b650adb71f..5f1b6aedc72 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp @@ -40,47 +40,12 @@ bool LocalObjectStorage::exists(const StoredObject & object) const return fs::exists(object.remote_path); } -std::unique_ptr LocalObjectStorage::readObjects( /// NOLINT - const StoredObjects & objects, - const ReadSettings & read_settings, - std::optional, - std::optional) const -{ - auto modified_settings = patchSettings(read_settings); - auto global_context = Context::getGlobalContextInstance(); - auto read_buffer_creator = [=](bool /* restricted_seek */, const StoredObject & object) -> std::unique_ptr - { return std::make_unique(object.remote_path); }; - - return std::make_unique( - std::move(read_buffer_creator), - objects, - "file:", - modified_settings, - global_context->getFilesystemCacheLog(), - /* use_external_buffer */ false); -} - ReadSettings LocalObjectStorage::patchSettings(const ReadSettings & read_settings) const { - if (!read_settings.enable_filesystem_cache) - return IObjectStorage::patchSettings(read_settings); - auto modified_settings{read_settings}; - /// For now we cannot allow asynchronous reader from local filesystem when CachedObjectStorage is used. - switch (modified_settings.local_fs_method) - { - case LocalFSReadMethod::pread_threadpool: - case LocalFSReadMethod::pread_fake_async: - { - modified_settings.local_fs_method = LocalFSReadMethod::pread; - LOG_INFO(log, "Changing local filesystem read method to `pread`"); - break; - } - default: - { - break; - } - } + /// Other options might break assertions in AsynchronousBoundedReadBuffer. + modified_settings.local_fs_method = LocalFSReadMethod::pread; + modified_settings.direct_io_threshold = 0; /// Disable. return IObjectStorage::patchSettings(modified_settings); } diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.h b/src/Disks/ObjectStorages/Local/LocalObjectStorage.h index 155359ce663..f1a0391a984 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.h +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.h @@ -34,12 +34,6 @@ public: std::optional read_hint = {}, std::optional file_size = {}) const override; - std::unique_ptr readObjects( /// NOLINT - const StoredObjects & objects, - const ReadSettings & read_settings, - std::optional read_hint = {}, - std::optional file_size = {}) const override; - /// Open the file for write and return WriteBufferFromFileBase object. std::unique_ptr writeObject( /// NOLINT const StoredObject & object, diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index 0df498e1a70..0a7f659ee7b 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -176,65 +176,6 @@ bool S3ObjectStorage::exists(const StoredObject & object) const return S3::objectExists(*client.get(), uri.bucket, object.remote_path, {}); } -std::unique_ptr S3ObjectStorage::readObjects( /// NOLINT - const StoredObjects & objects, - const ReadSettings & read_settings, - std::optional, - std::optional) const -{ - ReadSettings disk_read_settings = patchSettings(read_settings); - auto global_context = Context::getGlobalContextInstance(); - - auto settings_ptr = s3_settings.get(); - - auto read_buffer_creator = - [this, settings_ptr, disk_read_settings] - (bool restricted_seek, const StoredObject & object_) -> std::unique_ptr - { - return std::make_unique( - client.get(), - uri.bucket, - object_.remote_path, - uri.version_id, - settings_ptr->request_settings, - disk_read_settings, - /* use_external_buffer */true, - /* offset */0, - /* read_until_position */0, - restricted_seek); - }; - - switch (read_settings.remote_fs_method) - { - case RemoteFSReadMethod::read: - { - return std::make_unique( - std::move(read_buffer_creator), - objects, - "s3:" + uri.bucket + "/", - disk_read_settings, - global_context->getFilesystemCacheLog(), - /* use_external_buffer */false); - } - case RemoteFSReadMethod::threadpool: - { - auto impl = std::make_unique( - std::move(read_buffer_creator), - objects, - "s3:" + uri.bucket + "/", - disk_read_settings, - global_context->getFilesystemCacheLog(), - /* use_external_buffer */true); - - auto & reader = global_context->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER); - return std::make_unique( - std::move(impl), reader, disk_read_settings, - global_context->getAsyncReadCounters(), - global_context->getFilesystemReadPrefetchesLog()); - } - } -} - std::unique_ptr S3ObjectStorage::readObject( /// NOLINT const StoredObject & object, const ReadSettings & read_settings, @@ -248,7 +189,12 @@ std::unique_ptr S3ObjectStorage::readObject( /// NOLINT object.remote_path, uri.version_id, settings_ptr->request_settings, - patchSettings(read_settings)); + patchSettings(read_settings), + read_settings.remote_read_buffer_use_external_buffer, + /* offset */0, + /* read_until_position */0, + read_settings.remote_read_buffer_restrict_seek, + object.bytes_size ? std::optional(object.bytes_size) : std::nullopt); } std::unique_ptr S3ObjectStorage::writeObject( /// NOLINT diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h index b99867d8663..ef9da8a948e 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h @@ -89,12 +89,6 @@ public: std::optional read_hint = {}, std::optional file_size = {}) const override; - std::unique_ptr readObjects( /// NOLINT - const StoredObjects & objects, - const ReadSettings & read_settings, - std::optional read_hint = {}, - std::optional file_size = {}) const override; - /// Open the file for write and return WriteBufferFromFileBase object. std::unique_ptr writeObject( /// NOLINT const StoredObject & object, diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp index 7f7a3fe1a62..61ea584c4ad 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp @@ -233,69 +233,18 @@ WebObjectStorage::FileDataPtr WebObjectStorage::tryGetFileInfo(const String & pa } } -std::unique_ptr WebObjectStorage::readObjects( /// NOLINT - const StoredObjects & objects, - const ReadSettings & read_settings, - std::optional read_hint, - std::optional file_size) const -{ - if (objects.size() != 1) - throw Exception(ErrorCodes::LOGICAL_ERROR, "WebObjectStorage support read only from single object"); - - return readObject(objects[0], read_settings, read_hint, file_size); - -} - std::unique_ptr WebObjectStorage::readObject( /// NOLINT const StoredObject & object, const ReadSettings & read_settings, std::optional, std::optional) const { - size_t object_size = object.bytes_size; - auto read_buffer_creator = - [this, read_settings, object_size] - (bool /* restricted_seek */, const StoredObject & object_) -> std::unique_ptr - { - return std::make_unique( - fs::path(url) / object_.remote_path, - getContext(), - object_size, - read_settings, - /* use_external_buffer */true); - }; - - auto global_context = Context::getGlobalContextInstance(); - - switch (read_settings.remote_fs_method) - { - case RemoteFSReadMethod::read: - { - return std::make_unique( - std::move(read_buffer_creator), - StoredObjects{object}, - "url:" + url + "/", - read_settings, - global_context->getFilesystemCacheLog(), - /* use_external_buffer */false); - } - case RemoteFSReadMethod::threadpool: - { - auto impl = std::make_unique( - std::move(read_buffer_creator), - StoredObjects{object}, - "url:" + url + "/", - read_settings, - global_context->getFilesystemCacheLog(), - /* use_external_buffer */true); - - auto & reader = global_context->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER); - return std::make_unique( - std::move(impl), reader, read_settings, - global_context->getAsyncReadCounters(), - global_context->getFilesystemReadPrefetchesLog()); - } - } + return std::make_unique( + fs::path(url) / object.remote_path, + getContext(), + object.bytes_size, + read_settings, + read_settings.remote_read_buffer_use_external_buffer); } void WebObjectStorage::throwNotAllowed() diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.h b/src/Disks/ObjectStorages/Web/WebObjectStorage.h index 9f7f7c137e2..9b94ae01021 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.h +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.h @@ -39,12 +39,6 @@ public: std::optional read_hint = {}, std::optional file_size = {}) const override; - std::unique_ptr readObjects( /// NOLINT - const StoredObjects & objects, - const ReadSettings & read_settings, - std::optional read_hint = {}, - std::optional file_size = {}) const override; - /// Open the file for write and return WriteBufferFromFileBase object. std::unique_ptr writeObject( /// NOLINT const StoredObject & object, diff --git a/src/Functions/array/arrayAggregation.cpp b/src/Functions/array/arrayAggregation.cpp index adb1bb707d8..bb2503886f1 100644 --- a/src/Functions/array/arrayAggregation.cpp +++ b/src/Functions/array/arrayAggregation.cpp @@ -104,7 +104,7 @@ struct ArrayAggregateImpl static DataTypePtr getReturnType(const DataTypePtr & expression_return, const DataTypePtr & /*array_element*/) { - if (aggregate_operation == AggregateOperation::max || aggregate_operation == AggregateOperation::min) + if constexpr (aggregate_operation == AggregateOperation::max || aggregate_operation == AggregateOperation::min) { return expression_return; } @@ -152,9 +152,62 @@ struct ArrayAggregateImpl return result; } + template + requires(op == AggregateOperation::min || op == AggregateOperation::max) + static void executeMinOrMax(const ColumnPtr & mapped, const ColumnArray::Offsets & offsets, ColumnPtr & res_ptr) + { + const ColumnConst * const_column = checkAndGetColumn(&*mapped); + if (const_column) + { + MutableColumnPtr res_column = const_column->getDataColumn().cloneEmpty(); + res_column->insertMany(const_column->getField(), offsets.size()); + res_ptr = std::move(res_column); + return; + } + + MutableColumnPtr res_column = mapped->cloneEmpty(); + static constexpr int nan_null_direction_hint = aggregate_operation == AggregateOperation::min ? 1 : -1; + + /// TODO: Introduce row_begin and row_end to getPermutation or an equivalent function to use that instead + /// (same use case as SingleValueDataBase::getSmallestIndex) + UInt64 start_of_array = 0; + for (auto end_of_array : offsets) + { + /// Array is empty + if (start_of_array == end_of_array) + { + res_column->insertDefault(); + continue; + } + + UInt64 index = start_of_array; + for (UInt64 i = index + 1; i < end_of_array; i++) + { + if constexpr (aggregate_operation == AggregateOperation::min) + { + if ((mapped->compareAt(i, index, *mapped, nan_null_direction_hint) < 0)) + index = i; + } + else + { + if ((mapped->compareAt(i, index, *mapped, nan_null_direction_hint) > 0)) + index = i; + } + } + + res_column->insertFrom(*mapped, index); + start_of_array = end_of_array; + } + + chassert(res_column->size() == offsets.size()); + res_ptr = std::move(res_column); + } + template static NO_SANITIZE_UNDEFINED bool executeType(const ColumnPtr & mapped, const ColumnArray::Offsets & offsets, ColumnPtr & res_ptr) { + /// Min and Max are implemented in a different function + static_assert(aggregate_operation != AggregateOperation::min && aggregate_operation != AggregateOperation::max); using ResultType = ArrayAggregateResult; using ColVecType = ColumnVectorOrDecimal; using ColVecResultType = ColumnVectorOrDecimal; @@ -197,11 +250,6 @@ struct ArrayAggregateImpl /// Just multiply the value by array size. res[i] = x * static_cast(array_size); } - else if constexpr (aggregate_operation == AggregateOperation::min || - aggregate_operation == AggregateOperation::max) - { - res[i] = x; - } else if constexpr (aggregate_operation == AggregateOperation::average) { if constexpr (is_decimal) @@ -292,20 +340,6 @@ struct ArrayAggregateImpl { aggregate_value += element; } - else if constexpr (aggregate_operation == AggregateOperation::min) - { - if (element < aggregate_value) - { - aggregate_value = element; - } - } - else if constexpr (aggregate_operation == AggregateOperation::max) - { - if (element > aggregate_value) - { - aggregate_value = element; - } - } else if constexpr (aggregate_operation == AggregateOperation::product) { if constexpr (is_decimal) @@ -360,74 +394,41 @@ struct ArrayAggregateImpl static ColumnPtr execute(const ColumnArray & array, ColumnPtr mapped) { - if constexpr (aggregate_operation == AggregateOperation::max || aggregate_operation == AggregateOperation::min) - { - MutableColumnPtr res; - const auto & column = array.getDataPtr(); - const ColumnConst * const_column = checkAndGetColumn(&*column); - if (const_column) - { - res = const_column->getDataColumn().cloneEmpty(); - } - else - { - res = column->cloneEmpty(); - } - const IColumn::Offsets & offsets = array.getOffsets(); - size_t pos = 0; - for (const auto & offset : offsets) - { - if (offset == pos) - { - res->insertDefault(); - continue; - } - size_t current_max_or_min_index = pos; - ++pos; - for (; pos < offset; ++pos) - { - int compare_result = column->compareAt(pos, current_max_or_min_index, *column, 1); - if (aggregate_operation == AggregateOperation::max && compare_result > 0) - { - current_max_or_min_index = pos; - } - else if (aggregate_operation == AggregateOperation::min && compare_result < 0) - { - current_max_or_min_index = pos; - } - } - res->insert((*column)[current_max_or_min_index]); - } - return res; - } - const IColumn::Offsets & offsets = array.getOffsets(); ColumnPtr res; - if (executeType(mapped, offsets, res) || - executeType(mapped, offsets, res) || - executeType(mapped, offsets, res) || - executeType(mapped, offsets, res) || - executeType(mapped, offsets, res) || - executeType(mapped, offsets, res) || - executeType(mapped, offsets, res) || - executeType(mapped, offsets, res) || - executeType(mapped, offsets, res) || - executeType(mapped, offsets, res) || - executeType(mapped, offsets, res) || - executeType(mapped, offsets, res) || - executeType(mapped, offsets, res) || - executeType(mapped, offsets, res) || - executeType(mapped, offsets, res) || - executeType(mapped, offsets, res) || - executeType(mapped, offsets, res) || - executeType(mapped, offsets, res) || - executeType(mapped, offsets, res)) + if constexpr (aggregate_operation == AggregateOperation::min || aggregate_operation == AggregateOperation::max) { + executeMinOrMax(mapped, offsets, res); return res; } else - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Unexpected column for arraySum: {}", mapped->getName()); + { + if (executeType(mapped, offsets, res) || + executeType(mapped, offsets, res) || + executeType(mapped, offsets, res) || + executeType(mapped, offsets, res) || + executeType(mapped, offsets, res) || + executeType(mapped, offsets, res) || + executeType(mapped, offsets, res) || + executeType(mapped, offsets, res) || + executeType(mapped, offsets, res) || + executeType(mapped, offsets, res) || + executeType(mapped, offsets, res) || + executeType(mapped, offsets, res) || + executeType(mapped, offsets, res) || + executeType(mapped, offsets, res) || + executeType(mapped, offsets, res) || + executeType(mapped, offsets, res) || + executeType(mapped, offsets, res) || + executeType(mapped, offsets, res) || + executeType(mapped, offsets, res)) + { + return res; + } + } + + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Unexpected column for arraySum: {}", mapped->getName()); } }; diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index fb92fd72572..8a42360fd30 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -116,7 +116,8 @@ struct ReadSettings size_t remote_read_min_bytes_for_seek = DBMS_DEFAULT_BUFFER_SIZE; - FileCachePtr remote_fs_cache; + bool remote_read_buffer_restrict_seek = false; + bool remote_read_buffer_use_external_buffer = false; /// Bandwidth throttler to use during reading ThrottlerPtr remote_throttler; @@ -138,6 +139,14 @@ struct ReadSettings res.prefetch_buffer_size = std::min(std::max(1ul, file_size), prefetch_buffer_size); return res; } + + ReadSettings withNestedBuffer() const + { + ReadSettings res = *this; + res.remote_read_buffer_restrict_seek = true; + res.remote_read_buffer_use_external_buffer = true; + return res; + } }; ReadSettings getReadSettings(); diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 398a48c790b..981c1052d01 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -131,7 +131,12 @@ bool KeyMetadata::createBaseDirectory(bool throw_if_failed) { created_base_directory = false; - if (!throw_if_failed && e.code() == std::errc::no_space_on_device) + if (!throw_if_failed && + (e.code() == std::errc::no_space_on_device + || e.code() == std::errc::read_only_file_system + || e.code() == std::errc::permission_denied + || e.code() == std::errc::too_many_files_open + || e.code() == std::errc::operation_not_permitted)) { LOG_TRACE(cache_metadata->log, "Failed to create base directory for key {}, " "because no space left on device", key); diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 7efbb62b0d2..21b5b04bca3 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -16,6 +16,10 @@ bool less(const Field & lhs, const Field & rhs, int direction) bool equals(const Field & lhs, const Field & rhs) { + /// This will treat NaNs as equal + if (lhs.getType() == rhs.getType()) + return lhs == rhs; + return applyVisitor(FieldVisitorAccurateEquals(), lhs, rhs); } diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 7267cd274ad..f51da7d4e70 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -869,6 +869,26 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti { properties.indices = as_storage_metadata->getSecondaryIndices(); properties.projections = as_storage_metadata->getProjections().clone(); + + /// CREATE TABLE AS should copy PRIMARY KEY, ORDER BY, and similar clauses. + /// Note: only supports the source table engine is using the new syntax. + if (const auto * merge_tree_data = dynamic_cast(as_storage.get())) + { + if (merge_tree_data->format_version >= MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING) + { + if (!create.storage->primary_key && as_storage_metadata->isPrimaryKeyDefined() && as_storage_metadata->hasPrimaryKey()) + create.storage->set(create.storage->primary_key, as_storage_metadata->getPrimaryKeyAST()->clone()); + + if (!create.storage->partition_by && as_storage_metadata->isPartitionKeyDefined() && as_storage_metadata->hasPartitionKey()) + create.storage->set(create.storage->partition_by, as_storage_metadata->getPartitionKeyAST()->clone()); + + if (!create.storage->order_by && as_storage_metadata->isSortingKeyDefined() && as_storage_metadata->hasSortingKey()) + create.storage->set(create.storage->order_by, as_storage_metadata->getSortingKeyAST()->clone()); + + if (!create.storage->sample_by && as_storage_metadata->isSamplingKeyDefined() && as_storage_metadata->hasSamplingKey()) + create.storage->set(create.storage->sample_by, as_storage_metadata->getSamplingKeyAST()->clone()); + } + } } else { diff --git a/src/Storages/MergeTree/GinIndexStore.cpp b/src/Storages/MergeTree/GinIndexStore.cpp index 6e0273701ad..e92460ff498 100644 --- a/src/Storages/MergeTree/GinIndexStore.cpp +++ b/src/Storages/MergeTree/GinIndexStore.cpp @@ -24,6 +24,7 @@ namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int UNKNOWN_FORMAT_VERSION; + extern const int NOT_IMPLEMENTED; }; GinIndexPostingsBuilder::GinIndexPostingsBuilder(UInt64 limit) @@ -153,13 +154,18 @@ GinIndexStore::GinIndexStore(const String & name_, DataPartStoragePtr storage_) : name(name_) , storage(storage_) { + if (storage->getType() != MergeTreeDataPartStorageType::Full) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "INDEX {} with 'full_text' type supports only full storage", name); } + GinIndexStore::GinIndexStore(const String & name_, DataPartStoragePtr storage_, MutableDataPartStoragePtr data_part_storage_builder_, UInt64 max_digestion_size_) : name(name_) , storage(storage_) , data_part_storage_builder(data_part_storage_builder_) , max_digestion_size(max_digestion_size_) { + if (storage->getType() != MergeTreeDataPartStorageType::Full) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "INDEX {} with 'full_text' type supports only full storage", name); } bool GinIndexStore::exists() const diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index f0447e71539..fc785d6be17 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -648,7 +648,8 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::prepareProjectionsToMergeAndRe for (const auto & projection : projections) { - if (merge_may_reduce_rows) + /// Checking IGNORE here is just for compatibility. + if (merge_may_reduce_rows && mode != DeduplicateMergeProjectionMode::IGNORE) { global_ctx->projections_to_rebuild.push_back(&projection); continue; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index cff381a3429..6b70d1d9245 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -2129,6 +2129,8 @@ try runner([&, my_part = part]() { + auto blocker_for_runner_thread = CannotAllocateThreadFaultInjector::blockFaultInjections(); + auto res = loadDataPartWithRetries( my_part->info, my_part->name, my_part->disk, DataPartState::Outdated, data_parts_mutex, loading_parts_initial_backoff_ms, diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 62ce2c40f53..2b214aec434 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -221,7 +221,7 @@ struct Settings; /** Projection settings. */ \ M(UInt64, max_projections, 25, "The maximum number of merge tree projections.", 0) \ M(LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW, "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop projections of this table's relevant parts, or rebuild the projections.", 0) \ - M(DeduplicateMergeProjectionMode, deduplicate_merge_projection_mode, DeduplicateMergeProjectionMode::THROW, "Whether to allow create projection for the table with non-classic MergeTree, if allowed, what is the action when merge, drop or rebuild.", 0) \ + M(DeduplicateMergeProjectionMode, deduplicate_merge_projection_mode, DeduplicateMergeProjectionMode::THROW, "Whether to allow create projection for the table with non-classic MergeTree. Ignore option is purely for compatibility which might result in incorrect answer. Otherwise, if allowed, what is the action when merge, drop or rebuild.", 0) \ #define MAKE_OBSOLETE_MERGE_TREE_SETTING(M, TYPE, NAME, DEFAULT) \ M(TYPE, NAME, DEFAULT, "Obsolete setting, does nothing.", BaseSettingsHelpers::Flags::OBSOLETE) diff --git a/src/Storages/MergeTree/PartMetadataManagerOrdinary.cpp b/src/Storages/MergeTree/PartMetadataManagerOrdinary.cpp index 149ad6b4a10..6eac71eeaf7 100644 --- a/src/Storages/MergeTree/PartMetadataManagerOrdinary.cpp +++ b/src/Storages/MergeTree/PartMetadataManagerOrdinary.cpp @@ -11,7 +11,11 @@ namespace DB std::unique_ptr PartMetadataManagerOrdinary::read(const String & file_name) const { size_t file_size = part->getDataPartStorage().getFileSize(file_name); - auto res = part->getDataPartStorage().readFile(file_name, getReadSettings().adjustBufferSize(file_size), file_size, std::nullopt); + auto read_settings = getReadSettings().adjustBufferSize(file_size); + /// Default read method is pread_threadpool, but there is not much point in it here. + read_settings.local_fs_method = LocalFSReadMethod::pread; + + auto res = part->getDataPartStorage().readFile(file_name, read_settings, file_size, std::nullopt); if (isCompressedFromFileName(file_name)) return std::make_unique(std::move(res)); diff --git a/src/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp index 975097b5fda..26f4dec97e8 100644 --- a/src/Storages/MergeTree/checkDataPart.cpp +++ b/src/Storages/MergeTree/checkDataPart.cpp @@ -77,7 +77,14 @@ bool isRetryableException(std::exception_ptr exception_ptr) #endif catch (const ErrnoException & e) { - return e.getErrno() == EMFILE; + return e.getErrno() == EMFILE + || e.getErrno() == ENOMEM + || isNotEnoughMemoryErrorCode(e.code()) + || e.code() == ErrorCodes::NETWORK_ERROR + || e.code() == ErrorCodes::SOCKET_TIMEOUT + || e.code() == ErrorCodes::CANNOT_SCHEDULE_TASK + || e.code() == ErrorCodes::ABORTED; + } catch (const Coordination::Exception & e) { @@ -91,6 +98,22 @@ bool isRetryableException(std::exception_ptr exception_ptr) || e.code() == ErrorCodes::CANNOT_SCHEDULE_TASK || e.code() == ErrorCodes::ABORTED; } + catch (const std::filesystem::filesystem_error & e) + { + return e.code() == std::errc::no_space_on_device || + e.code() == std::errc::read_only_file_system || + e.code() == std::errc::too_many_files_open_in_system || + e.code() == std::errc::operation_not_permitted || + e.code() == std::errc::device_or_resource_busy || + e.code() == std::errc::permission_denied || + e.code() == std::errc::too_many_files_open || + e.code() == std::errc::text_file_busy || + e.code() == std::errc::timed_out || + e.code() == std::errc::not_enough_memory || + e.code() == std::errc::not_supported || + e.code() == std::errc::too_many_links || + e.code() == std::errc::too_many_symbolic_link_levels; + } catch (const Poco::Net::NetException &) { return true; @@ -171,13 +194,9 @@ static IMergeTreeDataPart::Checksums checkDataPart( SerializationInfo::Settings settings{ratio_of_defaults, false}; serialization_infos = SerializationInfoByName::readJSON(columns_txt, settings, *serialization_file); } - catch (const Poco::Exception & ex) - { - throw Exception(ErrorCodes::CORRUPTED_DATA, "Failed to load {}, with error {}", IMergeTreeDataPart::SERIALIZATION_FILE_NAME, ex.message()); - } catch (...) { - throw; + throw Exception(ErrorCodes::CORRUPTED_DATA, "Failed to load file {} of data part {}, with error {}", IMergeTreeDataPart::SERIALIZATION_FILE_NAME, data_part->name, getCurrentExceptionMessage(true)); } } @@ -399,18 +418,45 @@ IMergeTreeDataPart::Checksums checkDataPart( ReadSettings read_settings; read_settings.enable_filesystem_cache = false; + read_settings.enable_filesystem_cache_log = false; + read_settings.enable_filesystem_read_prefetches_log = false; + read_settings.page_cache = nullptr; + read_settings.load_marks_asynchronously = false; + read_settings.remote_fs_prefetch = false; + read_settings.page_cache_inject_eviction = false; + read_settings.use_page_cache_for_disks_without_file_cache = false; + read_settings.local_fs_method = LocalFSReadMethod::pread; + + try + { + return checkDataPart( + data_part, + data_part_storage, + data_part->getColumns(), + data_part->getType(), + data_part->getFileNamesWithoutChecksums(), + read_settings, + require_checksums, + is_cancelled, + is_broken_projection, + throw_on_broken_projection); + } + catch (...) + { + if (isRetryableException(std::current_exception())) + { + LOG_DEBUG( + getLogger("checkDataPart"), + "Got retriable error {} checking data part {}, will return empty", data_part->name, getCurrentExceptionMessage(false)); + + /// We were unable to check data part because of some temporary exception + /// like Memory limit exceeded. If part is actually broken we will retry check + /// with the next read attempt of this data part. + return IMergeTreeDataPart::Checksums{}; + } + throw; + } - return checkDataPart( - data_part, - data_part_storage, - data_part->getColumns(), - data_part->getType(), - data_part->getFileNamesWithoutChecksums(), - read_settings, - require_checksums, - is_cancelled, - is_broken_projection, - throw_on_broken_projection); }; try @@ -431,7 +477,16 @@ IMergeTreeDataPart::Checksums checkDataPart( catch (...) { if (isRetryableException(std::current_exception())) - throw; + { + LOG_DEBUG( + getLogger("checkDataPart"), + "Got retriable error {} checking data part {}, will return empty", data_part->name, getCurrentExceptionMessage(false)); + + /// We were unable to check data part because of some temporary exception + /// like Memory limit exceeded. If part is actually broken we will retry check + /// with the next read attempt of this data part. + return {}; + } return drop_cache_and_check(); } } diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index 0b7106de949..440a4af4b64 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -426,37 +427,39 @@ std::unique_ptr StorageObjectStorageSource::createReadBuffer( const auto & object_size = object_info.metadata->size_bytes; auto read_settings = context_->getReadSettings().adjustBufferSize(object_size); - read_settings.enable_filesystem_cache = false; /// FIXME: Changing this setting to default value breaks something around parquet reading read_settings.remote_read_min_bytes_for_seek = read_settings.remote_fs_buffer_size; + /// User's object may change, don't cache it. + read_settings.enable_filesystem_cache = false; + read_settings.use_page_cache_for_disks_without_file_cache = false; const bool object_too_small = object_size <= 2 * context_->getSettingsRef()[Setting::max_download_buffer_size]; - const bool use_prefetch = object_too_small && read_settings.remote_fs_method == RemoteFSReadMethod::threadpool; - read_settings.remote_fs_method = use_prefetch ? RemoteFSReadMethod::threadpool : RemoteFSReadMethod::read; - /// User's object may change, don't cache it. - read_settings.use_page_cache_for_disks_without_file_cache = false; + const bool use_prefetch = object_too_small + && read_settings.remote_fs_method == RemoteFSReadMethod::threadpool + && read_settings.remote_fs_prefetch; + + if (use_prefetch) + read_settings.remote_read_buffer_use_external_buffer = true; + + auto impl = object_storage->readObject(StoredObject(object_info.getPath(), "", object_size), read_settings); // Create a read buffer that will prefetch the first ~1 MB of the file. // When reading lots of tiny files, this prefetching almost doubles the throughput. // For bigger files, parallel reading is more useful. - if (use_prefetch) - { - LOG_TRACE(log, "Downloading object of size {} with initial prefetch", object_size); + if (!use_prefetch) + return impl; - auto async_reader = object_storage->readObjects( - StoredObjects{StoredObject{object_info.getPath(), /* local_path */ "", object_size}}, read_settings); + LOG_TRACE(log, "Downloading object of size {} with initial prefetch", object_size); - async_reader->setReadUntilEnd(); - if (read_settings.remote_fs_prefetch) - async_reader->prefetch(DEFAULT_PREFETCH_PRIORITY); + auto & reader = context_->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER); + impl = std::make_unique( + std::move(impl), reader, read_settings, + context_->getAsyncReadCounters(), + context_->getFilesystemReadPrefetchesLog()); - return async_reader; - } - else - { - /// FIXME: this is inconsistent that readObject always reads synchronously ignoring read_method setting. - return object_storage->readObject(StoredObject(object_info.getPath(), "", object_size), read_settings); - } + impl->setReadUntilEnd(); + impl->prefetch(DEFAULT_PREFETCH_PRIORITY); + return impl; } StorageObjectStorageSource::IIterator::IIterator(const std::string & logger_name_) diff --git a/src/Storages/SelectQueryDescription.cpp b/src/Storages/SelectQueryDescription.cpp index 0c06c523515..7129c8c66f0 100644 --- a/src/Storages/SelectQueryDescription.cpp +++ b/src/Storages/SelectQueryDescription.cpp @@ -100,17 +100,20 @@ void checkAllowedQueries(const ASTSelectQuery & query) /// check if only one single select query in SelectWithUnionQuery static bool isSingleSelect(const ASTPtr & select, ASTPtr & res) { - auto new_select = select->as(); - if (new_select.list_of_selects->children.size() != 1) + auto * new_select = select->as(); + if (new_select == nullptr) return false; - auto & new_inner_query = new_select.list_of_selects->children.at(0); + + if (new_select->list_of_selects->children.size() != 1) + return false; + auto & new_inner_query = new_select->list_of_selects->children.at(0); if (new_inner_query->as()) { res = new_inner_query; return true; } - else - return isSingleSelect(new_inner_query, res); + + return isSingleSelect(new_inner_query, res); } SelectQueryDescription SelectQueryDescription::getSelectQueryFromASTForMatView(const ASTPtr & select, bool refreshable, ContextPtr context) diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index f4d2ee67bb6..f4c78f72e20 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -1588,8 +1588,9 @@ bool StorageMergeTree::optimize( { assertNotReadonly(); + const auto mode = getSettings()->deduplicate_merge_projection_mode; if (deduplicate && getInMemoryMetadataPtr()->hasProjections() - && getSettings()->deduplicate_merge_projection_mode == DeduplicateMergeProjectionMode::THROW) + && (mode == DeduplicateMergeProjectionMode::THROW || mode == DeduplicateMergeProjectionMode::IGNORE)) throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "OPTIMIZE DEDUPLICATE query is not supported for table {} as it has projections. " "User should drop all the projections manually before running the query, " diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index a3d529c5fbb..85bb4a48ba7 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5833,8 +5833,9 @@ bool StorageReplicatedMergeTree::optimize( if (!is_leader) throw Exception(ErrorCodes::NOT_A_LEADER, "OPTIMIZE cannot be done on this replica because it is not a leader"); + const auto mode = getSettings()->deduplicate_merge_projection_mode; if (deduplicate && getInMemoryMetadataPtr()->hasProjections() - && getSettings()->deduplicate_merge_projection_mode == DeduplicateMergeProjectionMode::THROW) + && (mode == DeduplicateMergeProjectionMode::THROW || mode == DeduplicateMergeProjectionMode::IGNORE)) throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "OPTIMIZE DEDUPLICATE query is not supported for table {} as it has projections. " "User should drop all the projections manually before running the query, " diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 04f5a1625d1..10431ce038f 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -1133,12 +1133,15 @@ def main() -> int: if IS_CI and not pr_info.is_merge_queue: - if pr_info.is_release and pr_info.is_push_event: + if pr_info.is_master and pr_info.is_push_event: print("Release/master: CI Cache add pending records for all todo jobs") ci_cache.push_pending_all(pr_info.is_release) - # wait for pending jobs to be finished, await_jobs is a long blocking call - ci_cache.await_pending_jobs(pr_info.is_release) + if pr_info.is_master or pr_info.is_pr: + # - wait for pending jobs to be finished, await_jobs is a long blocking call + # - don't wait for release CI because some jobs may not be present there + # and we may wait until timeout in vain + ci_cache.await_pending_jobs(pr_info.is_release) # conclude results result["git_ref"] = git_ref diff --git a/tests/ci/integration_tests_runner.py b/tests/ci/integration_tests_runner.py index 678da331837..35f7309fbe2 100755 --- a/tests/ci/integration_tests_runner.py +++ b/tests/ci/integration_tests_runner.py @@ -14,10 +14,9 @@ import string import subprocess import sys import time -import zlib # for crc32 from collections import defaultdict from itertools import chain -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional from ci_utils import kill_ci_runner from env_helper import IS_CI @@ -44,10 +43,6 @@ TASK_TIMEOUT = 8 * 60 * 60 # 8 hours NO_CHANGES_MSG = "Nothing to run" -def stringhash(s): - return zlib.crc32(s.encode("utf-8")) - - # Search test by the common prefix. # This is accept tests w/o parameters in skip list. # @@ -255,7 +250,8 @@ def clear_ip_tables_and_restart_daemons(): class ClickhouseIntegrationTestsRunner: - def __init__(self, result_path, params): + def __init__(self, repo_path: str, result_path: str, params: dict): + self.repo_path = repo_path self.result_path = result_path self.params = params @@ -313,11 +309,11 @@ class ClickhouseIntegrationTestsRunner: def shuffle_test_groups(self): return self.shuffle_groups != 0 - def _pre_pull_images(self, repo_path): - image_cmd = self._get_runner_image_cmd(repo_path) + def _pre_pull_images(self): + image_cmd = self._get_runner_image_cmd() cmd = ( - f"cd {repo_path}/tests/integration && " + f"cd {self.repo_path}/tests/integration && " f"timeout --signal=KILL 1h ./runner {self._get_runner_opts()} {image_cmd} " "--pre-pull --command ' echo Pre Pull finished ' " ) @@ -422,12 +418,12 @@ class ClickhouseIntegrationTestsRunner: return " ".join(result) - def _get_all_tests(self, repo_path): - image_cmd = self._get_runner_image_cmd(repo_path) + def _get_all_tests(self) -> List[str]: + image_cmd = self._get_runner_image_cmd() runner_opts = self._get_runner_opts() out_file_full = os.path.join(self.result_path, "runner_get_all_tests.log") cmd = ( - f"cd {repo_path}/tests/integration && " + f"cd {self.repo_path}/tests/integration && " f"timeout --signal=KILL 1h ./runner {runner_opts} {image_cmd} -- --setup-plan " ) @@ -508,10 +504,10 @@ class ClickhouseIntegrationTestsRunner: for test in current_counters[state]: main_counters[state].append(test) - def _get_runner_image_cmd(self, repo_path): + def _get_runner_image_cmd(self): image_cmd = "" if self._can_run_with( - os.path.join(repo_path, "tests/integration", "runner"), + os.path.join(self.repo_path, "tests/integration", "runner"), "--docker-image-version", ): for img in IMAGES: @@ -523,7 +519,7 @@ class ClickhouseIntegrationTestsRunner: image_cmd += f" --docker-image-version={runner_version} " else: if self._can_run_with( - os.path.join(repo_path, "tests/integration", "runner"), + os.path.join(self.repo_path, "tests/integration", "runner"), "--docker-compose-images-tags", ): image_cmd += ( @@ -564,7 +560,6 @@ class ClickhouseIntegrationTestsRunner: def try_run_test_group( self, - repo_path, test_group, tests_in_group, num_tries, @@ -573,7 +568,6 @@ class ClickhouseIntegrationTestsRunner: ): try: return self.run_test_group( - repo_path, test_group, tests_in_group, num_tries, @@ -596,7 +590,6 @@ class ClickhouseIntegrationTestsRunner: def run_test_group( self, - repo_path, test_group, tests_in_group, num_tries, @@ -620,7 +613,7 @@ class ClickhouseIntegrationTestsRunner: tests_times[test] = 0 return counters, tests_times, [] - image_cmd = self._get_runner_image_cmd(repo_path) + image_cmd = self._get_runner_image_cmd() test_group_str = test_group.replace("/", "_").replace(".", "_") log_paths = [] @@ -639,10 +632,10 @@ class ClickhouseIntegrationTestsRunner: test_names.add(test_name) if i == 0: - test_data_dirs = self._find_test_data_dirs(repo_path, test_names) + test_data_dirs = self._find_test_data_dirs(self.repo_path, test_names) info_basename = test_group_str + "_" + str(i) + ".nfo" - info_path = os.path.join(repo_path, "tests/integration", info_basename) + info_path = os.path.join(self.repo_path, "tests/integration", info_basename) test_cmd = " ".join([shlex.quote(test) for test in sorted(test_names)]) parallel_cmd = f" --parallel {num_workers} " if num_workers > 0 else "" @@ -653,7 +646,7 @@ class ClickhouseIntegrationTestsRunner: # -p -- (p)assed # -s -- (s)kipped cmd = ( - f"cd {repo_path}/tests/integration && " + f"cd {self.repo_path}/tests/integration && " f"timeout --signal=KILL 1h ./runner {self._get_runner_opts()} " f"{image_cmd} -t {test_cmd} {parallel_cmd} {repeat_cmd} -- -rfEps --run-id={i} " f"--color=no --durations=0 {_get_deselect_option(self.should_skip_tests())} " @@ -661,7 +654,7 @@ class ClickhouseIntegrationTestsRunner: ) log_basename = test_group_str + "_" + str(i) + ".log" - log_path = os.path.join(repo_path, "tests/integration", log_basename) + log_path = os.path.join(self.repo_path, "tests/integration", log_basename) with open(log_path, "w", encoding="utf-8") as log: logging.info("Executing cmd: %s", cmd) # ignore retcode, since it meaningful due to pipe to tee @@ -678,7 +671,7 @@ class ClickhouseIntegrationTestsRunner: log_paths.append(log_result_path) for pytest_log_path in glob.glob( - os.path.join(repo_path, "tests/integration/pytest*.log") + os.path.join(self.repo_path, "tests/integration/pytest*.log") ): new_name = ( test_group_str @@ -689,11 +682,13 @@ class ClickhouseIntegrationTestsRunner: ) os.rename( pytest_log_path, - os.path.join(repo_path, "tests/integration", new_name), + os.path.join(self.repo_path, "tests/integration", new_name), ) extra_logs_names.append(new_name) - dockerd_log_path = os.path.join(repo_path, "tests/integration/dockerd.log") + dockerd_log_path = os.path.join( + self.repo_path, "tests/integration/dockerd.log" + ) if os.path.exists(dockerd_log_path): new_name = ( test_group_str @@ -704,7 +699,7 @@ class ClickhouseIntegrationTestsRunner: ) os.rename( dockerd_log_path, - os.path.join(repo_path, "tests/integration", new_name), + os.path.join(self.repo_path, "tests/integration", new_name), ) extra_logs_names.append(new_name) @@ -721,7 +716,7 @@ class ClickhouseIntegrationTestsRunner: for test_name, test_time in new_tests_times.items(): tests_times[test_name] = test_time - test_data_dirs_new = self._find_test_data_dirs(repo_path, test_names) + test_data_dirs_new = self._find_test_data_dirs(self.repo_path, test_names) test_data_dirs_diff = self._get_test_data_dirs_difference( test_data_dirs_new, test_data_dirs ) @@ -733,7 +728,7 @@ class ClickhouseIntegrationTestsRunner: "integration_run_" + test_group_str + "_" + str(i) + ".tar.zst", ) self._compress_logs( - os.path.join(repo_path, "tests/integration"), + os.path.join(self.repo_path, "tests/integration"), extra_logs_names + list(test_data_dirs_diff), extras_result_path, ) @@ -773,10 +768,10 @@ class ClickhouseIntegrationTestsRunner: return counters, tests_times, log_paths - def run_flaky_check(self, repo_path, build_path, should_fail=False): + def run_flaky_check(self, build_path, should_fail=False): pr_info = self.params["pr_info"] - tests_to_run = get_changed_tests_to_run(pr_info, repo_path) + tests_to_run = get_changed_tests_to_run(pr_info, self.repo_path) if not tests_to_run: logging.info("No integration tests to run found") return "success", NO_CHANGES_MSG, [(NO_CHANGES_MSG, "OK")], "" @@ -807,7 +802,6 @@ class ClickhouseIntegrationTestsRunner: final_retry += 1 logging.info("Running tests for the %s time", i) group_counters, group_test_times, log_paths = self.try_run_test_group( - repo_path, f"bugfix_{id_counter}" if should_fail else f"flaky{id_counter}", [test_to_run], 1, @@ -873,17 +867,15 @@ class ClickhouseIntegrationTestsRunner: return result_state, status_text, test_result, tests_log_paths - def run_impl(self, repo_path, build_path): + def run_impl(self, build_path): stopwatch = Stopwatch() if self.flaky_check or self.bugfix_validate_check: result_state, status_text, test_result, tests_log_paths = ( - self.run_flaky_check( - repo_path, build_path, should_fail=self.bugfix_validate_check - ) + self.run_flaky_check(build_path, should_fail=self.bugfix_validate_check) ) else: result_state, status_text, test_result, tests_log_paths = ( - self.run_normal_check(build_path, repo_path) + self.run_normal_check(build_path) ) if self.soft_deadline_time < time.time(): @@ -906,23 +898,35 @@ class ClickhouseIntegrationTestsRunner: return result_state, status_text, test_result, tests_log_paths - def run_normal_check(self, build_path, repo_path): + def _get_tests_by_hash(self) -> List[str]: + "Tries it's best to group the tests equally between groups" + all_tests = self._get_all_tests() + if self.run_by_hash_total == 0: + return all_tests + grouped_tests = self.group_test_by_file(all_tests) + groups_by_hash = { + g: [] for g in range(self.run_by_hash_total) + } # type: Dict[int, List[str]] + for tests_in_group in grouped_tests.values(): + # It should work determenistic, because it searches groups with min tests + min_group = min(len(tests) for tests in groups_by_hash.values()) + # And then it takes a group with min index + group_to_increase = min( + g for g, t in groups_by_hash.items() if len(t) == min_group + ) + groups_by_hash[group_to_increase].extend(tests_in_group) + return groups_by_hash[self.run_by_hash_num] + + def run_normal_check(self, build_path): self._install_clickhouse(build_path) logging.info("Pulling images") - self._pre_pull_images(repo_path) + self._pre_pull_images() logging.info( "Dump iptables before run %s", subprocess.check_output("sudo iptables -nvL", shell=True), ) - all_tests = self._get_all_tests(repo_path) - if self.run_by_hash_total != 0: - grouped_tests = self.group_test_by_file(all_tests) - all_filtered_by_hash_tests = [] - for group, tests_in_group in grouped_tests.items(): - if stringhash(group) % self.run_by_hash_total == self.run_by_hash_num: - all_filtered_by_hash_tests += tests_in_group - all_tests = all_filtered_by_hash_tests - parallel_skip_tests = self._get_parallel_tests_skip_list(repo_path) + all_tests = self._get_tests_by_hash() + parallel_skip_tests = self._get_parallel_tests_skip_list(self.repo_path) logging.info( "Found %s tests first 3 %s", len(all_tests), " ".join(all_tests[:3]) ) @@ -980,7 +984,7 @@ class ClickhouseIntegrationTestsRunner: break logging.info("Running test group %s containing %s tests", group, len(tests)) group_counters, group_test_times, log_paths = self.try_run_test_group( - repo_path, group, tests, MAX_RETRY, NUM_WORKERS, 0 + group, tests, MAX_RETRY, NUM_WORKERS, 0 ) total_tests = 0 for counter, value in group_counters.items(): @@ -1051,15 +1055,16 @@ def run(): signal.signal(signal.SIGTERM, handle_sigterm) logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s") - repo_path = os.environ.get("CLICKHOUSE_TESTS_REPO_PATH") - build_path = os.environ.get("CLICKHOUSE_TESTS_BUILD_PATH") - result_path = os.environ.get("CLICKHOUSE_TESTS_RESULT_PATH") - params_path = os.environ.get("CLICKHOUSE_TESTS_JSON_PARAMS_PATH") + repo_path = os.environ.get("CLICKHOUSE_TESTS_REPO_PATH", "") + build_path = os.environ.get("CLICKHOUSE_TESTS_BUILD_PATH", "") + result_path = os.environ.get("CLICKHOUSE_TESTS_RESULT_PATH", "") + params_path = os.environ.get("CLICKHOUSE_TESTS_JSON_PARAMS_PATH", "") + + assert all((repo_path, build_path, result_path, params_path)) - assert params_path with open(params_path, "r", encoding="utf-8") as jfd: params = json.loads(jfd.read()) - runner = ClickhouseIntegrationTestsRunner(result_path, params) + runner = ClickhouseIntegrationTestsRunner(repo_path, result_path, params) logging.info("Running tests") @@ -1068,9 +1073,7 @@ def run(): logging.info("Clearing dmesg before run") subprocess.check_call("sudo -E dmesg --clear", shell=True) - state, description, test_results, _test_log_paths = runner.run_impl( - repo_path, build_path - ) + state, description, test_results, _test_log_paths = runner.run_impl(build_path) logging.info("Tests finished") if IS_CI: diff --git a/tests/docker_scripts/stateless_runner.sh b/tests/docker_scripts/stateless_runner.sh index 307b41cf4f1..ba2dee87f6f 100755 --- a/tests/docker_scripts/stateless_runner.sh +++ b/tests/docker_scripts/stateless_runner.sh @@ -378,9 +378,9 @@ done # collect minio audit and server logs # wait for minio to flush its batch if it has any sleep 1 -clickhouse-client -q "SYSTEM FLUSH ASYNC INSERT QUEUE" -clickhouse-client ${logs_saver_client_options} -q "SELECT log FROM minio_audit_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_audit_logs.jsonl.zst' FORMAT JSONEachRow" -clickhouse-client ${logs_saver_client_options} -q "SELECT log FROM minio_server_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_server_logs.jsonl.zst' FORMAT JSONEachRow" +clickhouse-client -q "SYSTEM FLUSH ASYNC INSERT QUEUE" ||: +clickhouse-client ${logs_saver_client_options} -q "SELECT log FROM minio_audit_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_audit_logs.jsonl.zst' FORMAT JSONEachRow" ||: +clickhouse-client ${logs_saver_client_options} -q "SELECT log FROM minio_server_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_server_logs.jsonl.zst' FORMAT JSONEachRow" ||: # Stop server so we can safely read data with clickhouse-local. # Why do we read data with clickhouse-local? diff --git a/tests/integration/compose/docker_compose_rabbitmq.yml b/tests/integration/compose/docker_compose_rabbitmq.yml index 4aae2427596..0e5203b925f 100644 --- a/tests/integration/compose/docker_compose_rabbitmq.yml +++ b/tests/integration/compose/docker_compose_rabbitmq.yml @@ -1,7 +1,9 @@ services: rabbitmq1: - image: rabbitmq:3.12.6-alpine + image: rabbitmq:4.0.2-alpine hostname: rabbitmq1 + environment: + RABBITMQ_FEATURE_FLAGS: feature_flags_v2,message_containers expose: - ${RABBITMQ_PORT:-5672} - ${RABBITMQ_SECURE_PORT:-5671} @@ -14,3 +16,4 @@ services: - /misc/rabbitmq/ca-cert.pem:/etc/rabbitmq/ca-cert.pem - /misc/rabbitmq/server-cert.pem:/etc/rabbitmq/server-cert.pem - /misc/rabbitmq/server-key.pem:/etc/rabbitmq/server-key.pem + - /misc/rabbitmq/enabled_plugins:/etc/rabbitmq/enabled_plugins \ No newline at end of file diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 47ca63db420..d26487e9aa4 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -18,7 +18,7 @@ import traceback import urllib.parse from functools import cache from pathlib import Path -from typing import List, Sequence, Tuple, Union +from typing import Any, List, Sequence, Tuple, Union import requests import urllib3 @@ -228,7 +228,9 @@ def retry_exception(num, delay, func, exception=Exception, *args, **kwargs): raise StopIteration("Function did not finished successfully") -def subprocess_check_call(args, detach=False, nothrow=False): +def subprocess_check_call( + args: Union[Sequence[str], str], detach: bool = False, nothrow: bool = False +) -> str: # Uncomment for debugging # logging.info('run:' + ' '.join(args)) return run_and_check(args, detach=detach, nothrow=nothrow) @@ -296,19 +298,32 @@ def check_postgresql_java_client_is_available(postgresql_java_client_id): return p.returncode == 0 -def check_rabbitmq_is_available(rabbitmq_id, cookie): - p = subprocess.Popen( - docker_exec( - "-e", - f"RABBITMQ_ERLANG_COOKIE={cookie}", - rabbitmq_id, - "rabbitmqctl", - "await_startup", - ), - stdout=subprocess.PIPE, - ) - p.wait(timeout=60) - return p.returncode == 0 +def check_rabbitmq_is_available(rabbitmq_id, cookie, timeout=90): + try: + subprocess.check_output( + docker_exec( + "-e", + f"RABBITMQ_ERLANG_COOKIE={cookie}", + rabbitmq_id, + "rabbitmqctl", + "await_startup", + ), + stderr=subprocess.STDOUT, + timeout=timeout, + ) + return True + except subprocess.CalledProcessError as e: + # Raised if the command returns a non-zero exit code + error_message = ( + f"RabbitMQ startup failed with return code {e.returncode}. " + f"Output: {e.output.decode(errors='replace')}" + ) + raise RuntimeError(error_message) + except subprocess.TimeoutExpired as e: + # Raised if the command times out + raise RuntimeError( + f"RabbitMQ startup timed out. Output: {e.output.decode(errors='replace')}" + ) def rabbitmq_debuginfo(rabbitmq_id, cookie): @@ -372,22 +387,6 @@ async def nats_connect_ssl(nats_port, user, password, ssl_ctx=None): return nc -def enable_consistent_hash_plugin(rabbitmq_id, cookie): - p = subprocess.Popen( - docker_exec( - "-e", - f"RABBITMQ_ERLANG_COOKIE={cookie}", - rabbitmq_id, - "rabbitmq-plugins", - "enable", - "rabbitmq_consistent_hash_exchange", - ), - stdout=subprocess.PIPE, - ) - p.communicate() - return p.returncode == 0 - - def get_instances_dir(name): instances_dir_name = "_instances" @@ -2059,8 +2058,14 @@ class ClickHouseCluster: return self.docker_client.api.logs(container_id).decode() def exec_in_container( - self, container_id, cmd, detach=False, nothrow=False, use_cli=True, **kwargs - ): + self, + container_id: str, + cmd: Sequence[str], + detach: bool = False, + nothrow: bool = False, + use_cli: bool = True, + **kwargs: Any, + ) -> str: if use_cli: logging.debug( f"run container_id:{container_id} detach:{detach} nothrow:{nothrow} cmd: {cmd}" @@ -2071,10 +2076,11 @@ class ClickHouseCluster: if "privileged" in kwargs: exec_cmd += ["--privileged"] result = subprocess_check_call( - exec_cmd + [container_id] + cmd, detach=detach, nothrow=nothrow + exec_cmd + [container_id] + list(cmd), detach=detach, nothrow=nothrow ) return result else: + assert self.docker_client is not None exec_id = self.docker_client.api.exec_create(container_id, cmd, **kwargs) output = self.docker_client.api.exec_start(exec_id, detach=detach) @@ -2083,16 +2089,15 @@ class ClickHouseCluster: container_info = self.docker_client.api.inspect_container(container_id) image_id = container_info.get("Image") image_info = self.docker_client.api.inspect_image(image_id) - logging.debug(("Command failed in container {}: ".format(container_id))) + logging.debug("Command failed in container %s: ", container_id) pprint.pprint(container_info) logging.debug("") - logging.debug( - ("Container {} uses image {}: ".format(container_id, image_id)) - ) + logging.debug("Container %s uses image %s: ", container_id, image_id) pprint.pprint(image_info) logging.debug("") - message = 'Cmd "{}" failed in container {}. Return code {}. Output: {}'.format( - " ".join(cmd), container_id, exit_code, output + message = ( + f'Cmd "{" ".join(cmd)}" failed in container {container_id}. ' + f"Return code {exit_code}. Output: {output}" ) if nothrow: logging.debug(message) @@ -2347,22 +2352,14 @@ class ClickHouseCluster: self.print_all_docker_pieces() self.rabbitmq_ip = self.get_instance_ip(self.rabbitmq_host) - start = time.time() - while time.time() - start < timeout: - try: - if check_rabbitmq_is_available( - self.rabbitmq_docker_id, self.rabbitmq_cookie - ): - logging.debug("RabbitMQ is available") - if enable_consistent_hash_plugin( - self.rabbitmq_docker_id, self.rabbitmq_cookie - ): - logging.debug("RabbitMQ consistent hash plugin is available") - return True - time.sleep(0.5) - except Exception as ex: - logging.debug("Can't connect to RabbitMQ " + str(ex)) - time.sleep(0.5) + try: + if check_rabbitmq_is_available( + self.rabbitmq_docker_id, self.rabbitmq_cookie, timeout + ): + logging.debug("RabbitMQ is available") + return True + except Exception as ex: + logging.debug("RabbitMQ await_startup failed", exc_info=True) try: with open(os.path.join(self.rabbitmq_dir, "docker.log"), "w+") as f: @@ -2390,39 +2387,35 @@ class ClickHouseCluster: def wait_zookeeper_secure_to_start(self, timeout=20): logging.debug("Wait ZooKeeper Secure to start") - start = time.time() - while time.time() - start < timeout: - try: - for instance in ["zoo1", "zoo2", "zoo3"]: - conn = self.get_kazoo_client(instance) - conn.get_children("/") - conn.stop() - logging.debug("All instances of ZooKeeper Secure started") - return - except Exception as ex: - logging.debug("Can't connect to ZooKeeper secure " + str(ex)) - time.sleep(0.5) + nodes = ["zoo1", "zoo2", "zoo3"] + self.wait_zookeeper_nodes_to_start(nodes, timeout) - raise Exception("Cannot wait ZooKeeper secure container") - - def wait_zookeeper_to_start(self, timeout=180): + def wait_zookeeper_to_start(self, timeout: float = 180) -> None: logging.debug("Wait ZooKeeper to start") + nodes = ["zoo1", "zoo2", "zoo3"] + self.wait_zookeeper_nodes_to_start(nodes, timeout) + + def wait_zookeeper_nodes_to_start( + self, nodes: List[str], timeout: float = 60 + ) -> None: start = time.time() + err = Exception("") while time.time() - start < timeout: try: - for instance in ["zoo1", "zoo2", "zoo3"]: - conn = self.get_kazoo_client(instance) + for node in nodes: + conn = self.get_kazoo_client(node) conn.get_children("/") conn.stop() - logging.debug("All instances of ZooKeeper started") + logging.debug("All instances of ZooKeeper started: %s", nodes) return except Exception as ex: - logging.debug(f"Can't connect to ZooKeeper {instance}: {ex}") + logging.debug("Can't connect to ZooKeeper %s: %s", node, ex) + err = ex time.sleep(0.5) raise Exception( "Cannot wait ZooKeeper container (probably it's a `iptables-nft` issue, you may try to `sudo iptables -P FORWARD ACCEPT`)" - ) + ) from err def make_hdfs_api(self, timeout=180, kerberized=False): if kerberized: @@ -3367,7 +3360,7 @@ class ClickHouseInstance: self.name = name self.base_cmd = cluster.base_cmd self.docker_id = cluster.get_instance_docker_id(self.name) - self.cluster = cluster + self.cluster = cluster # type: ClickHouseCluster self.hostname = hostname if hostname is not None else self.name self.external_dirs = external_dirs @@ -3978,7 +3971,13 @@ class ClickHouseInstance: self.stop_clickhouse(stop_start_wait_sec, kill) self.start_clickhouse(stop_start_wait_sec) - def exec_in_container(self, cmd, detach=False, nothrow=False, **kwargs): + def exec_in_container( + self, + cmd: Sequence[str], + detach: bool = False, + nothrow: bool = False, + **kwargs: Any, + ) -> str: return self.cluster.exec_in_container( self.docker_id, cmd, detach, nothrow, **kwargs ) diff --git a/tests/integration/helpers/keeper_utils.py b/tests/integration/helpers/keeper_utils.py index ae57c09825a..43d996fc048 100644 --- a/tests/integration/helpers/keeper_utils.py +++ b/tests/integration/helpers/keeper_utils.py @@ -1,11 +1,13 @@ import contextlib import io +import logging import re import select import socket import subprocess import time -import typing as tp +from os import path as p +from typing import Iterable, List, Optional, Sequence, Union from kazoo.client import KazooClient @@ -23,7 +25,7 @@ ss_established = [ ] -def get_active_zk_connections(node: ClickHouseInstance) -> tp.List[str]: +def get_active_zk_connections(node: ClickHouseInstance) -> List[str]: return ( str(node.exec_in_container(ss_established, privileged=True, user="root")) .strip() @@ -41,6 +43,7 @@ def get_zookeeper_which_node_connected_to(node: ClickHouseInstance) -> str: assert ( len(result) == 1 ), "ClickHouse must be connected only to one Zookeeper at a time" + assert isinstance(result[0], str) return result[0] @@ -118,8 +121,10 @@ class KeeperClient(object): in str(e) and retry_count < connection_tries ): - print( - f"Got exception while connecting to Keeper: {e}\nWill reconnect, reconnect count = {retry_count}" + logging.debug( + "Got exception while connecting to Keeper: %s\nWill reconnect, reconnect count = %s", + e, + retry_count, ) time.sleep(1) else: @@ -169,12 +174,12 @@ class KeeperClient(object): def get(self, path: str, timeout: float = 60.0) -> str: return self.execute_query(f"get '{path}'", timeout) - def set(self, path: str, value: str, version: tp.Optional[int] = None) -> None: + def set(self, path: str, value: str, version: Optional[int] = None) -> None: self.execute_query( f"set '{path}' '{value}' {version if version is not None else ''}" ) - def rm(self, path: str, version: tp.Optional[int] = None) -> None: + def rm(self, path: str, version: Optional[int] = None) -> None: self.execute_query(f"rm '{path}' {version if version is not None else ''}") def exists(self, path: str, timeout: float = 60.0) -> bool: @@ -208,9 +213,9 @@ class KeeperClient(object): def reconfig( self, - joining: tp.Optional[str], - leaving: tp.Optional[str], - new_members: tp.Optional[str], + joining: Optional[str], + leaving: Optional[str], + new_members: Optional[str], timeout: float = 60.0, ) -> str: if bool(joining) + bool(leaving) + bool(new_members) != 1: @@ -236,7 +241,7 @@ class KeeperClient(object): @classmethod @contextlib.contextmanager def from_cluster( - cls, cluster: ClickHouseCluster, keeper_node: str, port: tp.Optional[int] = None + cls, cluster: ClickHouseCluster, keeper_node: str, port: Optional[int] = None ) -> "KeeperClient": client = cls( cluster.server_bin_path, @@ -353,3 +358,22 @@ def wait_configs_equal(left_config: str, right_zk: KeeperClient, timeout: float f"timeout while checking nodes configs to get equal. " f"Left: {left_config}, right: {right_config}" ) + + +def replace_zookeeper_config( + nodes: Union[Sequence[ClickHouseInstance], ClickHouseInstance], new_config: str +) -> None: + if not isinstance(nodes, Sequence): + nodes = (nodes,) + for node in nodes: + node.replace_config("/etc/clickhouse-server/conf.d/zookeeper.xml", new_config) + node.query("SYSTEM RELOAD CONFIG") + + +def reset_zookeeper_config( + nodes: Union[Sequence[ClickHouseInstance], ClickHouseInstance], + file_path: str = p.join(p.dirname(p.realpath(__file__)), "zookeeper_config.xml"), +) -> None: + """Resets the keeper config to default or to a given path on the disk""" + with open(file_path, "r", encoding="utf-8") as cf: + replace_zookeeper_config(nodes, cf.read()) diff --git a/tests/integration/test_keeper_four_word_command/test.py b/tests/integration/test_keeper_four_word_command/test.py index 6d6f3c9a8ad..c163ad434ae 100644 --- a/tests/integration/test_keeper_four_word_command/test.py +++ b/tests/integration/test_keeper_four_word_command/test.py @@ -344,7 +344,7 @@ def test_cmd_srvr(started_cluster): assert result["Received"] == "10" assert result["Sent"] == "10" assert int(result["Connections"]) == 1 - assert int(result["Zxid"], 16) > 10 + assert int(result["Zxid"], 16) >= 10 assert result["Mode"] == "leader" assert result["Node count"] == "14" diff --git a/tests/integration/test_keeper_nodes_move/configs/enable_keeper1.xml b/tests/integration/test_keeper_nodes_move/configs/enable_keeper1.xml index 1e57d42016d..d2f805ea0f8 100644 --- a/tests/integration/test_keeper_nodes_move/configs/enable_keeper1.xml +++ b/tests/integration/test_keeper_nodes_move/configs/enable_keeper1.xml @@ -1,5 +1,6 @@ + 0 9181 1 /var/lib/clickhouse/coordination/log @@ -9,6 +10,7 @@ 5000 10000 trace + 5 diff --git a/tests/integration/test_keeper_nodes_move/configs/enable_keeper2.xml b/tests/integration/test_keeper_nodes_move/configs/enable_keeper2.xml index 98422b41c9b..4d344004705 100644 --- a/tests/integration/test_keeper_nodes_move/configs/enable_keeper2.xml +++ b/tests/integration/test_keeper_nodes_move/configs/enable_keeper2.xml @@ -1,5 +1,6 @@ + 0 9181 2 /var/lib/clickhouse/coordination/log @@ -9,6 +10,7 @@ 5000 10000 trace + 5 diff --git a/tests/integration/test_keeper_nodes_move/configs/enable_keeper3.xml b/tests/integration/test_keeper_nodes_move/configs/enable_keeper3.xml index 43800bd2dfb..5da8c2bf606 100644 --- a/tests/integration/test_keeper_nodes_move/configs/enable_keeper3.xml +++ b/tests/integration/test_keeper_nodes_move/configs/enable_keeper3.xml @@ -1,5 +1,6 @@ + 0 9181 3 /var/lib/clickhouse/coordination/log @@ -9,6 +10,7 @@ 5000 10000 trace + 5 diff --git a/tests/integration/test_keeper_nodes_move/configs/enable_keeper_node4_1.xml b/tests/integration/test_keeper_nodes_move/configs/enable_keeper_node4_1.xml index 0d7544f9a5b..da2f99f702e 100644 --- a/tests/integration/test_keeper_nodes_move/configs/enable_keeper_node4_1.xml +++ b/tests/integration/test_keeper_nodes_move/configs/enable_keeper_node4_1.xml @@ -1,5 +1,6 @@ + 0 9181 1 /var/lib/clickhouse/coordination/log @@ -9,6 +10,7 @@ 5000 10000 trace + 5 diff --git a/tests/integration/test_keeper_nodes_move/configs/enable_keeper_node4_2.xml b/tests/integration/test_keeper_nodes_move/configs/enable_keeper_node4_2.xml index 65feae85e3e..7fd35fab0be 100644 --- a/tests/integration/test_keeper_nodes_move/configs/enable_keeper_node4_2.xml +++ b/tests/integration/test_keeper_nodes_move/configs/enable_keeper_node4_2.xml @@ -1,5 +1,6 @@ + 0 9181 2 /var/lib/clickhouse/coordination/log @@ -9,6 +10,7 @@ 5000 10000 trace + 5 diff --git a/tests/integration/test_keeper_nodes_move/configs/enable_keeper_node4_4.xml b/tests/integration/test_keeper_nodes_move/configs/enable_keeper_node4_4.xml index 2499de4fe86..c8041eef5de 100644 --- a/tests/integration/test_keeper_nodes_move/configs/enable_keeper_node4_4.xml +++ b/tests/integration/test_keeper_nodes_move/configs/enable_keeper_node4_4.xml @@ -1,5 +1,6 @@ + 0 9181 4 /var/lib/clickhouse/coordination/log @@ -9,6 +10,7 @@ 5000 10000 trace + 5 diff --git a/tests/integration/test_mysql_protocol/test.py b/tests/integration/test_mysql_protocol/test.py index cfadc27a2b5..63ce23bc21c 100644 --- a/tests/integration/test_mysql_protocol/test.py +++ b/tests/integration/test_mysql_protocol/test.py @@ -670,6 +670,7 @@ def test_python_client(started_cluster): cursor.execute("INSERT INTO table1 VALUES (1), (4)") cursor.execute("SELECT * FROM table1 ORDER BY a") assert cursor.fetchall() == [{"a": 1}, {"a": 1}, {"a": 3}, {"a": 4}] + cursor.execute("DROP DATABASE x") def test_golang_client(started_cluster, golang_container): diff --git a/tests/integration/test_read_only_table/test.py b/tests/integration/test_read_only_table/test.py index cf5f82a5b1e..030b3ed0b16 100644 --- a/tests/integration/test_read_only_table/test.py +++ b/tests/integration/test_read_only_table/test.py @@ -69,3 +69,5 @@ def test_restart_zookeeper(start_cluster): retry_count=10, sleep_time=1, ) + # restore the cluster state + cluster.start_zookeeper_nodes([node1_zk]) diff --git a/tests/integration/test_reload_zookeeper/test.py b/tests/integration/test_reload_zookeeper/test.py index 78e997da9e6..cabefffee8f 100644 --- a/tests/integration/test_reload_zookeeper/test.py +++ b/tests/integration/test_reload_zookeeper/test.py @@ -1,13 +1,18 @@ -import os -import time +from os import path as p import pytest from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster -from helpers.keeper_utils import get_active_zk_connections +from helpers.keeper_utils import ( + get_active_zk_connections, + replace_zookeeper_config, + reset_zookeeper_config, +) from helpers.test_tools import assert_eq_with_retry +from helpers.utility import random_string +default_zk_config = p.join(p.dirname(p.realpath(__file__)), "configs/zookeeper.xml") cluster = ClickHouseCluster(__file__, zookeeper_config_path="configs/zookeeper.xml") node = cluster.add_instance("node", with_zookeeper=True) @@ -16,14 +21,6 @@ node = cluster.add_instance("node", with_zookeeper=True) def start_cluster(): try: cluster.start() - node.query( - """ - CREATE TABLE test_table(date Date, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/shard1/test/test_table', '1') - PARTITION BY toYYYYMM(date) - ORDER BY id - """ - ) yield cluster finally: @@ -31,19 +28,15 @@ def start_cluster(): def test_reload_zookeeper(start_cluster): - def wait_zookeeper_node_to_start(zk_nodes, timeout=60): - start = time.time() - while time.time() - start < timeout: - try: - for instance in zk_nodes: - conn = start_cluster.get_kazoo_client(instance) - conn.get_children("/") - print("All instances of ZooKeeper started") - return - except Exception as ex: - print(("Can't connect to ZooKeeper " + str(ex))) - time.sleep(0.5) - + # random is used for flaky tests, where ZK is not fast enough to clear the node + node.query( + f""" + CREATE TABLE test_table(date Date, id UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/shard1/{random_string(7)}/test_table', '1') + PARTITION BY toYYYYMM(date) + ORDER BY id + """ + ) node.query( "INSERT INTO test_table(date, id) select today(), number FROM numbers(1000)" ) @@ -60,8 +53,7 @@ def test_reload_zookeeper(start_cluster): """ - node.replace_config("/etc/clickhouse-server/conf.d/zookeeper.xml", new_config) - node.query("SYSTEM RELOAD CONFIG") + replace_zookeeper_config(node, new_config) ## config reloads, but can still work assert_eq_with_retry( node, "SELECT COUNT() FROM test_table", "1000", retry_count=120, sleep_time=0.5 @@ -78,7 +70,7 @@ def test_reload_zookeeper(start_cluster): ## start zoo2, zoo3, table will be readonly too, because it only connect to zoo1 cluster.start_zookeeper_nodes(["zoo2", "zoo3"]) - wait_zookeeper_node_to_start(["zoo2", "zoo3"]) + cluster.wait_zookeeper_nodes_to_start(["zoo2", "zoo3"]) node.query("SELECT COUNT() FROM test_table") with pytest.raises(QueryRuntimeException): node.query( @@ -98,8 +90,7 @@ def test_reload_zookeeper(start_cluster): """ - node.replace_config("/etc/clickhouse-server/conf.d/zookeeper.xml", new_config) - node.query("SYSTEM RELOAD CONFIG") + replace_zookeeper_config(node, new_config) active_zk_connections = get_active_zk_connections(node) assert ( @@ -114,3 +105,8 @@ def test_reload_zookeeper(start_cluster): assert ( len(active_zk_connections) == 1 ), "Total connections to ZooKeeper not equal to 1, {}".format(active_zk_connections) + # Reset cluster state + cluster.start_zookeeper_nodes(["zoo1", "zoo2", "zoo3"]) + cluster.wait_zookeeper_nodes_to_start(["zoo1", "zoo2", "zoo3"]) + reset_zookeeper_config(node, default_zk_config) + node.query("DROP TABLE test_table") diff --git a/tests/integration/test_remove_stale_moving_parts/test.py b/tests/integration/test_remove_stale_moving_parts/test.py index e8aa6c8a8fe..7218ef55a40 100644 --- a/tests/integration/test_remove_stale_moving_parts/test.py +++ b/tests/integration/test_remove_stale_moving_parts/test.py @@ -70,20 +70,6 @@ def wait_part_is_stuck(node, table_moving_path, moving_part): time.sleep(1) -def wait_zookeeper_node_to_start(zk_nodes, timeout=60): - start = time.time() - while time.time() - start < timeout: - try: - for instance in zk_nodes: - conn = cluster.get_kazoo_client(instance) - conn.get_children("/") - print("All instances of ZooKeeper started") - return - except Exception as ex: - print(("Can't connect to ZooKeeper " + str(ex))) - time.sleep(0.5) - - def test_remove_stale_moving_parts_without_zookeeper(started_cluster): ch1.query(f"CREATE DATABASE IF NOT EXISTS {DATABASE_NAME}") @@ -113,7 +99,7 @@ def test_remove_stale_moving_parts_without_zookeeper(started_cluster): assert exec(ch1, "ls", table_moving_path).strip() == "" cluster.start_zookeeper_nodes(["zoo1", "zoo2", "zoo3"]) - wait_zookeeper_node_to_start(["zoo1", "zoo2", "zoo3"]) + cluster.wait_zookeeper_nodes_to_start(["zoo1", "zoo2", "zoo3"]) q(ch1, "SYSTEM START MOVES") q(ch1, f"DROP TABLE test_remove") diff --git a/tests/integration/test_replicated_database/test.py b/tests/integration/test_replicated_database/test.py index 20e2dbca7dd..6fd337cf214 100644 --- a/tests/integration/test_replicated_database/test.py +++ b/tests/integration/test_replicated_database/test.py @@ -962,8 +962,8 @@ def test_recover_staled_replica(started_cluster): def test_recover_staled_replica_many_mvs(started_cluster): - main_node.query("DROP DATABASE IF EXISTS recover_mvs") - dummy_node.query("DROP DATABASE IF EXISTS recover_mvs") + main_node.query("DROP DATABASE IF EXISTS recover_mvs SYNC") + dummy_node.query("DROP DATABASE IF EXISTS recover_mvs SYNC") main_node.query_with_retry( "CREATE DATABASE IF NOT EXISTS recover_mvs ENGINE = Replicated('/clickhouse/databases/recover_mvs', 'shard1', 'replica1');" @@ -1104,8 +1104,8 @@ def test_recover_staled_replica_many_mvs(started_cluster): query = "SELECT name FROM system.tables WHERE database='recover_mvs' ORDER BY name" assert main_node.query(query) == dummy_node.query(query) - main_node.query("DROP DATABASE IF EXISTS recover_mvs") - dummy_node.query("DROP DATABASE IF EXISTS recover_mvs") + main_node.query("DROP DATABASE IF EXISTS recover_mvs SYNC") + dummy_node.query("DROP DATABASE IF EXISTS recover_mvs SYNC") def test_startup_without_zk(started_cluster): @@ -1124,8 +1124,10 @@ def test_startup_without_zk(started_cluster): main_node.query("INSERT INTO startup.rmt VALUES (42)") with PartitionManager() as pm: - pm.drop_instance_zk_connections(main_node) - main_node.restart_clickhouse(stop_start_wait_sec=60) + pm.drop_instance_zk_connections( + main_node, action="REJECT --reject-with tcp-reset" + ) + main_node.restart_clickhouse(stop_start_wait_sec=120) assert main_node.query("SELECT (*,).1 FROM startup.rmt") == "42\n" # we need to wait until the table is not readonly @@ -1220,6 +1222,11 @@ def test_sync_replica(started_cluster): def test_force_synchronous_settings(started_cluster): + main_node.query("DROP DATABASE IF EXISTS test_force_synchronous_settings SYNC") + dummy_node.query("DROP DATABASE IF EXISTS test_force_synchronous_settings SYNC") + snapshotting_node.query( + "DROP DATABASE IF EXISTS test_force_synchronous_settings SYNC" + ) main_node.query( "CREATE DATABASE test_force_synchronous_settings ENGINE = Replicated('/clickhouse/databases/test2', 'shard1', 'replica1');" ) @@ -1284,8 +1291,8 @@ def test_force_synchronous_settings(started_cluster): def test_recover_digest_mismatch(started_cluster): - main_node.query("DROP DATABASE IF EXISTS recover_digest_mismatch") - dummy_node.query("DROP DATABASE IF EXISTS recover_digest_mismatch") + main_node.query("DROP DATABASE IF EXISTS recover_digest_mismatch SYNC") + dummy_node.query("DROP DATABASE IF EXISTS recover_digest_mismatch SYNC") main_node.query( "CREATE DATABASE recover_digest_mismatch ENGINE = Replicated('/clickhouse/databases/recover_digest_mismatch', 'shard1', 'replica1');" @@ -1330,15 +1337,16 @@ def test_recover_digest_mismatch(started_cluster): dummy_node.start_clickhouse() assert_eq_with_retry(dummy_node, query, expected) - main_node.query("DROP DATABASE IF EXISTS recover_digest_mismatch") - dummy_node.query("DROP DATABASE IF EXISTS recover_digest_mismatch") + main_node.query("DROP DATABASE IF EXISTS recover_digest_mismatch SYNC") + dummy_node.query("DROP DATABASE IF EXISTS recover_digest_mismatch SYNC") print("Everything Okay") def test_replicated_table_structure_alter(started_cluster): - main_node.query("DROP DATABASE IF EXISTS table_structure") - dummy_node.query("DROP DATABASE IF EXISTS table_structure") + main_node.query("DROP DATABASE IF EXISTS table_structure SYNC") + dummy_node.query("DROP DATABASE IF EXISTS table_structure SYNC") + competing_node.query("DROP DATABASE IF EXISTS table_structure SYNC") main_node.query( "CREATE DATABASE table_structure ENGINE = Replicated('/clickhouse/databases/table_structure', 'shard1', 'replica1');" @@ -1440,8 +1448,8 @@ def test_modify_comment(started_cluster): def test_table_metadata_corruption(started_cluster): - main_node.query("DROP DATABASE IF EXISTS table_metadata_corruption") - dummy_node.query("DROP DATABASE IF EXISTS table_metadata_corruption") + main_node.query("DROP DATABASE IF EXISTS table_metadata_corruption SYNC") + dummy_node.query("DROP DATABASE IF EXISTS table_metadata_corruption SYNC") main_node.query( "CREATE DATABASE table_metadata_corruption ENGINE = Replicated('/clickhouse/databases/table_metadata_corruption', 'shard1', 'replica1');" @@ -1479,13 +1487,18 @@ def test_table_metadata_corruption(started_cluster): dummy_node.start_clickhouse() assert_eq_with_retry(dummy_node, query, expected) - main_node.query("DROP DATABASE IF EXISTS table_metadata_corruption") - dummy_node.query("DROP DATABASE IF EXISTS table_metadata_corruption") + main_node.query("DROP DATABASE IF EXISTS table_metadata_corruption SYNC") + dummy_node.query("DROP DATABASE IF EXISTS table_metadata_corruption SYNC") def test_auto_recovery(started_cluster): - dummy_node.query("DROP DATABASE IF EXISTS auto_recovery") - bad_settings_node.query("DROP DATABASE IF EXISTS auto_recovery") + dummy_node.query("DROP DATABASE IF EXISTS auto_recovery SYNC") + bad_settings_node.query( + "DROP DATABASE IF EXISTS auto_recovery", + settings={ + "implicit_transaction": 0, + }, + ) dummy_node.query( "CREATE DATABASE auto_recovery ENGINE = Replicated('/clickhouse/databases/auto_recovery', 'shard1', 'replica1');" @@ -1532,8 +1545,8 @@ def test_auto_recovery(started_cluster): def test_all_groups_cluster(started_cluster): - dummy_node.query("DROP DATABASE IF EXISTS db_cluster") - bad_settings_node.query("DROP DATABASE IF EXISTS db_cluster") + dummy_node.query("DROP DATABASE IF EXISTS db_cluster SYNC") + bad_settings_node.query("DROP DATABASE IF EXISTS db_cluster SYNC") dummy_node.query( "CREATE DATABASE db_cluster ENGINE = Replicated('/clickhouse/databases/all_groups_cluster', 'shard1', 'replica1');" ) diff --git a/tests/integration/test_replicated_user_defined_functions/test.py b/tests/integration/test_replicated_user_defined_functions/test.py index aba507a569c..2b877efee9d 100644 --- a/tests/integration/test_replicated_user_defined_functions/test.py +++ b/tests/integration/test_replicated_user_defined_functions/test.py @@ -1,17 +1,19 @@ import inspect -import os.path -import time from contextlib import nullcontext as does_not_raise +from os import path as p import pytest from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster -from helpers.keeper_utils import get_active_zk_connections +from helpers.keeper_utils import ( + get_active_zk_connections, + replace_zookeeper_config, + reset_zookeeper_config, +) from helpers.test_tools import TSV, assert_eq_with_retry -SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) - +default_zk_config = p.join(p.dirname(p.realpath(__file__)), "configs/zookeeper.xml") cluster = ClickHouseCluster(__file__, zookeeper_config_path="configs/zookeeper.xml") node1 = cluster.add_instance( @@ -40,32 +42,6 @@ def started_cluster(): cluster.shutdown() -def wait_zookeeper_node_to_start(zk_nodes, timeout=60): - start = time.time() - while time.time() - start < timeout: - try: - for instance in zk_nodes: - conn = cluster.get_kazoo_client(instance) - conn.get_children("/") - print("All instances of ZooKeeper started") - return - except Exception as ex: - print(("Can't connect to ZooKeeper " + str(ex))) - time.sleep(0.5) - - -def replace_zookeeper_config(new_config): - node1.replace_config("/etc/clickhouse-server/conf.d/zookeeper.xml", new_config) - node2.replace_config("/etc/clickhouse-server/conf.d/zookeeper.xml", new_config) - node1.query("SYSTEM RELOAD CONFIG") - node2.query("SYSTEM RELOAD CONFIG") - - -def revert_zookeeper_config(): - with open(os.path.join(SCRIPT_DIR, "configs/zookeeper.xml"), "r") as f: - replace_zookeeper_config(f.read()) - - def test_create_and_drop(): node1.query("CREATE FUNCTION f1 AS (x, y) -> x + y") assert node1.query("SELECT f1(12, 3)") == "15\n" @@ -192,6 +168,7 @@ def test_reload_zookeeper(): # remove zoo2, zoo3 from configs replace_zookeeper_config( + (node1, node2), inspect.cleandoc( """ @@ -204,7 +181,7 @@ def test_reload_zookeeper(): """ - ) + ), ) # config reloads, but can still work @@ -228,7 +205,7 @@ def test_reload_zookeeper(): # start zoo2, zoo3, user-defined functions will be readonly too, because it only connect to zoo1 cluster.start_zookeeper_nodes(["zoo2", "zoo3"]) - wait_zookeeper_node_to_start(["zoo2", "zoo3"]) + cluster.wait_zookeeper_nodes_to_start(["zoo2", "zoo3"]) assert node2.query( "SELECT name FROM system.functions WHERE name IN ['f1', 'f2', 'f3'] ORDER BY name" ) == TSV(["f1", "f2"]) @@ -238,6 +215,7 @@ def test_reload_zookeeper(): # set config to zoo2, server will be normal replace_zookeeper_config( + (node1, node2), inspect.cleandoc( """ @@ -250,7 +228,7 @@ def test_reload_zookeeper(): """ - ) + ), ) active_zk_connections = get_active_zk_connections(node1) @@ -278,7 +256,7 @@ def test_reload_zookeeper(): # switch to the original version of zookeeper config cluster.start_zookeeper_nodes(["zoo1", "zoo2", "zoo3"]) - revert_zookeeper_config() + reset_zookeeper_config((node1, node2), default_zk_config) # Start without ZooKeeper must be possible, user-defined functions will be loaded after connecting to ZooKeeper. @@ -295,7 +273,7 @@ def test_start_without_zookeeper(): ) cluster.start_zookeeper_nodes(["zoo1", "zoo2", "zoo3"]) - wait_zookeeper_node_to_start(["zoo1", "zoo2", "zoo3"]) + cluster.wait_zookeeper_nodes_to_start(["zoo1", "zoo2", "zoo3"]) assert_eq_with_retry( node2, diff --git a/tests/integration/test_replicated_users/test.py b/tests/integration/test_replicated_users/test.py index 92d6b7b614e..ed524ce0118 100644 --- a/tests/integration/test_replicated_users/test.py +++ b/tests/integration/test_replicated_users/test.py @@ -1,13 +1,19 @@ import inspect import time from dataclasses import dataclass +from os import path as p import pytest from helpers.cluster import ClickHouseCluster -from helpers.keeper_utils import get_active_zk_connections +from helpers.keeper_utils import ( + get_active_zk_connections, + replace_zookeeper_config, + reset_zookeeper_config, +) from helpers.test_tools import TSV, assert_eq_with_retry +default_zk_config = p.join(p.dirname(p.realpath(__file__)), "configs/zookeeper.xml") cluster = ClickHouseCluster(__file__, zookeeper_config_path="configs/zookeeper.xml") node1 = cluster.add_instance( @@ -171,25 +177,6 @@ def test_rename_replicated(started_cluster, entity): # ReplicatedAccessStorage must be able to continue working after reloading ZooKeeper. def test_reload_zookeeper(started_cluster): - def wait_zookeeper_node_to_start(zk_nodes, timeout=60): - start = time.time() - while time.time() - start < timeout: - try: - for instance in zk_nodes: - conn = cluster.get_kazoo_client(instance) - conn.get_children("/") - print("All instances of ZooKeeper started") - return - except Exception as ex: - print(("Can't connect to ZooKeeper " + str(ex))) - time.sleep(0.5) - - def replace_zookeeper_config(new_config): - node1.replace_config("/etc/clickhouse-server/conf.d/zookeeper.xml", new_config) - node2.replace_config("/etc/clickhouse-server/conf.d/zookeeper.xml", new_config) - node1.query("SYSTEM RELOAD CONFIG") - node2.query("SYSTEM RELOAD CONFIG") - node1.query("CREATE USER u1") assert_eq_with_retry( node2, "SELECT name FROM system.users WHERE name ='u1'", "u1\n" @@ -197,6 +184,7 @@ def test_reload_zookeeper(started_cluster): ## remove zoo2, zoo3 from configs replace_zookeeper_config( + (node1, node2), """ @@ -207,7 +195,7 @@ def test_reload_zookeeper(started_cluster): 2000 -""" +""", ) ## config reloads, but can still work @@ -227,7 +215,7 @@ def test_reload_zookeeper(started_cluster): ## start zoo2, zoo3, users will be readonly too, because it only connect to zoo1 cluster.start_zookeeper_nodes(["zoo2", "zoo3"]) - wait_zookeeper_node_to_start(["zoo2", "zoo3"]) + cluster.wait_zookeeper_nodes_to_start(["zoo2", "zoo3"]) assert node2.query( "SELECT name FROM system.users WHERE name IN ['u1', 'u2'] ORDER BY name" ) == TSV(["u1", "u2"]) @@ -235,6 +223,7 @@ def test_reload_zookeeper(started_cluster): ## set config to zoo2, server will be normal replace_zookeeper_config( + (node1, node2), """ @@ -245,7 +234,7 @@ def test_reload_zookeeper(started_cluster): 2000 -""" +""", ) active_zk_connections = get_active_zk_connections(node1) @@ -264,3 +253,9 @@ def test_reload_zookeeper(started_cluster): assert ( len(active_zk_connections) == 1 ), "Total connections to ZooKeeper not equal to 1, {}".format(active_zk_connections) + + # Restore the test state + node1.query("DROP USER u1, u2, u3") + cluster.start_zookeeper_nodes(["zoo1", "zoo2", "zoo3"]) + cluster.wait_zookeeper_nodes_to_start(["zoo1", "zoo2", "zoo3"]) + reset_zookeeper_config((node1, node2), default_zk_config) diff --git a/tests/integration/test_startup_scripts/configs/config.d/startup_scripts.xml b/tests/integration/test_startup_scripts/configs/config.d/startup_scripts.xml index e8a711a926a..21d2865efe8 100644 --- a/tests/integration/test_startup_scripts/configs/config.d/startup_scripts.xml +++ b/tests/integration/test_startup_scripts/configs/config.d/startup_scripts.xml @@ -13,5 +13,13 @@ SELECT * FROM system.query_log LIMIT 1 + + SELECT 1 SETTINGS skip_unavailable_shards = 1 + SELECT 1; + + + SELECT 1 SETTINGS skip_unavailable_shards = 1 + SELECT 1; + diff --git a/tests/integration/test_startup_scripts/test.py b/tests/integration/test_startup_scripts/test.py index 43a871a6fc5..3146db12082 100644 --- a/tests/integration/test_startup_scripts/test.py +++ b/tests/integration/test_startup_scripts/test.py @@ -16,6 +16,12 @@ def test_startup_scripts(): try: cluster.start() assert node.query("SHOW TABLES") == "TestTable\n" + assert ( + node.query( + "SELECT value, changed FROM system.settings WHERE name = 'skip_unavailable_shards'" + ) + == "0\t0\n" + ) finally: cluster.shutdown() diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index c235e5dad89..62019120140 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -1087,7 +1087,7 @@ def test_drop_table(started_cluster): started_cluster, files_path, files_to_generate, start_ind=0, row_num=100000 ) create_mv(node, table_name, dst_table_name) - node.wait_for_log_line(f"Reading from file: test_drop_data") + node.wait_for_log_line(f"rows from file: test_drop_data") node.query(f"DROP TABLE {table_name} SYNC") assert node.contains_in_log( f"StorageS3Queue (default.{table_name}): Table is being dropped" diff --git a/tests/output.txt b/tests/output.txt deleted file mode 100644 index 14cf08aac3b..00000000000 --- a/tests/output.txt +++ /dev/null @@ -1,1218 +0,0 @@ -Using queries from 'queries' directory -Connecting to ClickHouse server... OK - -Running 1 stateless tests (MainProcess). - -02240_protobuflist_format_persons: [ FAIL ] - return code: 1, result: - -a7522158-3d41-4b77-ad69-6c598ee55c49 Ivan Petrov male 1980-12-29 png +74951234567\0 1 2019-01-05 18:45:00 38 capricorn ['Yesterday','Flowers'] [255,0,0] Moscow [55.753215,37.622504] 3.14 214.1 0.1 5.8 17060000000 ['meter','centimeter','kilometer'] [1,0.01,1000] 500 [501,502] -c694ad8a-f714-4ea3-907d-fd54fb25d9b5 Natalia Sokolova female 1992-03-08 jpg \N 0 \N 26 pisces [] [100,200,50] Plymouth [50.403724,-4.142123] 3.14159 \N 0.007 5.4 -20000000000000 [] [] \N [] -a7da1aa6-f425-4789-8947-b034786ed374 Vasily Sidorov male 1995-07-28 bmp +442012345678 1 2018-12-30 00:00:00 23 leo ['Sunny'] [250,244,10] Murmansk [68.970682,33.074981] 3.14159265358979 100000000000 800 -3.2 154400000 ['pound'] [16] 503 [] - -Schema 02240_protobuflist1_format_persons:Person - -Binary representation: -00000000 ba 04 0a f4 01 0a 24 61 37 35 32 32 31 35 38 2d |......$a7522158-| -00000010 33 64 34 31 2d 34 62 37 37 2d 61 64 36 39 2d 36 |3d41-4b77-ad69-6| -00000020 63 35 39 38 65 65 35 35 63 34 39 12 04 49 76 61 |c598ee55c49..Iva| -00000030 6e 1a 06 50 65 74 72 6f 76 20 01 28 af 1f 32 03 |n..Petrov .(..2.| -00000040 70 6e 67 3a 0d 2b 37 34 39 35 31 32 33 34 35 36 |png:.+7495123456| -00000050 37 00 40 01 4d fc d0 30 5c 50 26 58 09 62 09 59 |7.@.M..0\P&X.b.Y| -00000060 65 73 74 65 72 64 61 79 62 07 46 6c 6f 77 65 72 |esterdayb.Flower| -00000070 73 6a 04 ff 01 00 00 72 06 4d 6f 73 63 6f 77 7a |sj.....r.Moscowz| -00000080 08 4b 03 5f 42 72 7d 16 42 81 01 1f 85 eb 51 b8 |.K._Br}.B.....Q.| -00000090 1e 09 40 89 01 33 33 33 33 33 c3 6a 40 95 01 cd |..@..33333.j@...| -000000a0 cc cc 3d 9d 01 9a 99 b9 40 a0 01 80 c4 d7 8d 7f |..=.....@.......| -000000b0 aa 01 0c 0a 05 6d 65 74 65 72 15 00 00 80 3f aa |.....meter....?.| -000000c0 01 11 0a 0a 63 65 6e 74 69 6d 65 74 65 72 15 0a |....centimeter..| -000000d0 d7 23 3c aa 01 10 0a 09 6b 69 6c 6f 6d 65 74 65 |.#<.....kilomete| -000000e0 72 15 00 00 7a 44 b2 01 10 0a 0e a2 06 0b 0a 09 |r...zD..........| -000000f0 08 f4 03 12 04 f5 03 f6 03 0a 7e 0a 24 63 36 39 |..........~.$c69| -00000100 34 61 64 38 61 2d 66 37 31 34 2d 34 65 61 33 2d |4ad8a-f714-4ea3-| -00000110 39 30 37 64 2d 66 64 35 34 66 62 32 35 64 39 62 |907d-fd54fb25d9b| -00000120 35 12 07 4e 61 74 61 6c 69 61 1a 08 53 6f 6b 6f |5..Natalia..Soko| -00000130 6c 6f 76 61 28 a6 3f 32 03 6a 70 67 50 1a 58 0b |lova(.?2.jpgP.X.| -00000140 6a 04 64 c8 01 32 72 08 50 6c 79 6d 6f 75 74 68 |j.d..2r.Plymouth| -00000150 7a 08 6a 9d 49 42 46 8c 84 c0 81 01 6e 86 1b f0 |z.j.IBF.....n...| -00000160 f9 21 09 40 95 01 42 60 e5 3b 9d 01 cd cc ac 40 |.!.@..B`.;.....@| -00000170 a0 01 ff ff a9 ce 93 8c 09 0a c0 01 0a 24 61 37 |.............$a7| -00000180 64 61 31 61 61 36 2d 66 34 32 35 2d 34 37 38 39 |da1aa6-f425-4789| -00000190 2d 38 39 34 37 2d 62 30 33 34 37 38 36 65 64 33 |-8947-b034786ed3| -000001a0 37 34 12 06 56 61 73 69 6c 79 1a 07 53 69 64 6f |74..Vasily..Sido| -000001b0 72 6f 76 20 01 28 fb 48 32 03 62 6d 70 3a 0d 2b |rov .(.H2.bmp:.+| -000001c0 34 34 32 30 31 32 33 34 35 36 37 38 40 01 4d 50 |442012345678@.MP| -000001d0 e0 27 5c 50 17 58 04 62 05 53 75 6e 6e 79 6a 05 |.'\P.X.b.Sunnyj.| -000001e0 fa 01 f4 01 0a 72 08 4d 75 72 6d 61 6e 73 6b 7a |.....r.Murmanskz| -000001f0 08 fd f0 89 42 c8 4c 04 42 81 01 11 2d 44 54 fb |....B.L.B...-DT.| -00000200 21 09 40 89 01 00 00 00 e8 76 48 37 42 95 01 00 |!.@......vH7B...| -00000210 00 48 44 9d 01 cd cc 4c c0 a0 01 80 d4 9f 93 01 |.HD....L........| -00000220 aa 01 0c 0a 05 70 6f 75 6e 64 15 00 00 80 41 b2 |.....pound....A.| -00000230 01 0a 0a 08 a2 06 05 0a 03 08 f7 03 |............| -0000023c - -MESSAGE #1 AT 0x00000005 -uuid: "a7522158-3d41-4b77-ad69-6c598ee55c49" -name: "Ivan" -surname: "Petrov" -gender: male -birthDate: 4015 -photo: "png" -phoneNumber: "+74951234567\000" -isOnline: true -visitTime: 1546703100 -age: 38 -zodiacSign: capricorn -songs: "Yesterday" -songs: "Flowers" -color: 255 -color: 0 -color: 0 -hometown: "Moscow" -location: 55.7532158 -location: 37.6225052 -pi: 3.14 -lotteryWin: 214.1 -someRatio: 0.1 -temperature: 5.8 -randomBigNumber: 17060000000 -measureUnits { - unit: "meter" - coef: 1 -} -measureUnits { - unit: "centimeter" - coef: 0.01 -} -measureUnits { - unit: "kilometer" - coef: 1000 -} -nestiness { - a { - b { - c { - d: 500 - e: 501 - e: 502 - } - } - } -} -MESSAGE #2 AT 0x000000FB -uuid: "c694ad8a-f714-4ea3-907d-fd54fb25d9b5" -name: "Natalia" -surname: "Sokolova" -birthDate: 8102 -photo: "jpg" -age: 26 -zodiacSign: pisces -color: 100 -color: 200 -color: 50 -hometown: "Plymouth" -location: 50.4037247 -location: -4.14212322 -pi: 3.14159 -someRatio: 0.007 -temperature: 5.4 -randomBigNumber: -20000000000000 -MESSAGE #3 AT 0x0000017C -uuid: "a7da1aa6-f425-4789-8947-b034786ed374" -name: "Vasily" -surname: "Sidorov" -gender: male -birthDate: 9339 -photo: "bmp" -phoneNumber: "+442012345678" -isOnline: true -visitTime: 1546117200 -age: 23 -zodiacSign: leo -songs: "Sunny" -color: 250 -color: 244 -color: 10 -hometown: "Murmansk" -location: 68.9706802 -location: 33.0749817 -pi: 3.14159265358979 -lotteryWin: 100000000000 -someRatio: 800 -temperature: -3.2 -randomBigNumber: 154400000 -measureUnits { - unit: "pound" - coef: 16 -} -nestiness { - a { - b { - c { - d: 503 - } - } - } -} - -Binary representation is as expected - -Roundtrip: -a7522158-3d41-4b77-ad69-6c598ee55c49 Ivan Petrov male 1980-12-29 png +74951234567\0 1 2019-01-05 18:45:00 38 capricorn ['Yesterday','Flowers'] [255,0,0] Moscow [55.753216,37.622504] 3.14 214.1 0.1 5.8 17060000000 ['meter','centimeter','kilometer'] [1,0.01,1000] 500 [501,502] -c694ad8a-f714-4ea3-907d-fd54fb25d9b5 Natalia Sokolova female 1992-03-08 jpg \N 0 \N 26 pisces [] [100,200,50] Plymouth [50.403724,-4.142123] 3.14159 \N 0.007 5.4 -20000000000000 [] [] \N [] -a7da1aa6-f425-4789-8947-b034786ed374 Vasily Sidorov male 1995-07-28 bmp +442012345678 1 2018-12-30 00:00:00 23 leo ['Sunny'] [250,244,10] Murmansk [68.97068,33.074982] 3.14159265358979 100000000000 800 -3.2 154400000 ['pound'] [16] 503 [] - -Schema 02240_protobuflist2_format_persons:AltPerson - -Binary representation: -00000000 f4 03 0a c4 01 08 01 12 04 49 76 61 6e 28 87 a8 |.........Ivan(..| -00000010 c4 9b 97 02 52 06 50 65 74 72 6f 76 72 0c 00 00 |....R.Petrovr...| -00000020 7f 43 00 00 00 00 00 00 00 00 79 fc d0 30 5c 00 |.C........y..0\.| -00000030 00 00 00 c8 02 0a c2 05 0c 00 00 80 3f 0a d7 23 |............?..#| -00000040 3c 00 00 7a 44 9a 06 05 6d 65 74 65 72 9a 06 0a |<..zD...meter...| -00000050 63 65 6e 74 69 6d 65 74 65 72 9a 06 09 6b 69 6c |centimeter...kil| -00000060 6f 6d 65 74 65 72 a1 06 00 00 00 a0 99 99 b9 3f |ometer.........?| -00000070 a8 06 37 a8 06 25 bd 06 c3 f5 48 40 fa 06 02 33 |..7..%....H@...3| -00000080 38 90 08 c6 09 e1 08 00 f1 da f8 03 00 00 00 b0 |8...............| -00000090 09 af 1f d0 0c d6 01 e2 12 24 61 37 35 32 32 31 |.........$a75221| -000000a0 35 38 2d 33 64 34 31 2d 34 62 37 37 2d 61 64 36 |58-3d41-4b77-ad6| -000000b0 39 2d 36 63 35 39 38 65 65 35 35 63 34 39 a0 38 |9-6c598ee55c49.8| -000000c0 f4 03 aa 38 04 f5 03 f6 03 0a 84 01 12 07 4e 61 |...8..........Na| -000000d0 74 61 6c 69 61 52 08 53 6f 6b 6f 6c 6f 76 61 72 |taliaR.Sokolovar| -000000e0 0c 00 00 c8 42 00 00 48 43 00 00 48 42 c8 02 0a |....B..HC..HB...| -000000f0 a1 06 00 00 00 40 08 ac 7c 3f a8 06 32 a8 06 fc |.....@..|?..2...| -00000100 ff ff ff ff ff ff ff ff 01 b0 06 01 bd 06 d0 0f |................| -00000110 49 40 fa 06 02 32 36 90 08 db 01 e1 08 00 c0 1a |I@...26.........| -00000120 63 cf ed ff ff b0 09 a6 3f e2 12 24 63 36 39 34 |c.......?..$c694| -00000130 61 64 38 61 2d 66 37 31 34 2d 34 65 61 33 2d 39 |ad8a-f714-4ea3-9| -00000140 30 37 64 2d 66 64 35 34 66 62 32 35 64 39 62 35 |07d-fd54fb25d9b5| -00000150 0a a3 01 08 01 12 06 56 61 73 69 6c 79 28 ce ca |.......Vasily(..| -00000160 f4 cf ee 0c 52 07 53 69 64 6f 72 6f 76 72 0c 00 |....R.Sidorovr..| -00000170 00 7a 43 00 00 74 43 00 00 20 41 79 50 e0 27 5c |.zC..tC.. AyP.'\| -00000180 00 00 00 00 c8 02 05 c2 05 04 00 00 80 41 9a 06 |.............A..| -00000190 05 70 6f 75 6e 64 a1 06 00 00 00 00 00 00 89 40 |.pound.........@| -000001a0 a8 06 44 a8 06 21 bd 06 db 0f 49 40 fa 06 02 32 |..D..!....I@...2| -000001b0 33 90 08 d3 05 e1 08 00 f5 33 09 00 00 00 00 b0 |3........3......| -000001c0 09 fb 48 d0 0c 80 d0 db c3 f4 02 e2 12 24 61 37 |..H..........$a7| -000001d0 64 61 31 61 61 36 2d 66 34 32 35 2d 34 37 38 39 |da1aa6-f425-4789| -000001e0 2d 38 39 34 37 2d 62 30 33 34 37 38 36 65 64 33 |-8947-b034786ed3| -000001f0 37 34 a0 38 f7 03 |74.8..| -000001f6 - -MESSAGE #1 AT 0x00000005 -isOnline: online -name: "Ivan" -phoneNumber: 74951234567 -surname: "Petrov" -color: 255 -color: 0 -color: 0 -visitTime: 1546703100 -temperature: 5 -measureUnits_coef: 1 -measureUnits_coef: 0.01 -measureUnits_coef: 1000 -measureUnits_unit: "meter" -measureUnits_unit: "centimeter" -measureUnits_unit: "kilometer" -someRatio: 0.10000000149011612 -location: 55 -location: 37 -pi: 3.14 -age: "38" -zodiacSign: 1222 -randomBigNumber: 17060000000 -birthDate: 4015 -lotteryWin: 214 -uuid: "a7522158-3d41-4b77-ad69-6c598ee55c49" -nestiness_a_b_c_d: 500 -nestiness_a_b_c_e: 501 -nestiness_a_b_c_e: 502 -MESSAGE #2 AT 0x000000CC -name: "Natalia" -surname: "Sokolova" -color: 100 -color: 200 -color: 50 -temperature: 5 -someRatio: 0.0070000002160668373 -location: 50 -location: -4 -gender: female -pi: 3.14159 -age: "26" -zodiacSign: 219 -randomBigNumber: -20000000000000 -birthDate: 8102 -uuid: "c694ad8a-f714-4ea3-907d-fd54fb25d9b5" -MESSAGE #3 AT 0x00000153 -isOnline: online -name: "Vasily" -phoneNumber: 442012345678 -surname: "Sidorov" -color: 250 -color: 244 -color: 10 -visitTime: 1546117200 -temperature: -3 -measureUnits_coef: 16 -measureUnits_unit: "pound" -someRatio: 800 -location: 68 -location: 33 -pi: 3.14159274 -age: "23" -zodiacSign: 723 -randomBigNumber: 154400000 -birthDate: 9339 -lotteryWin: 100000000000 -uuid: "a7da1aa6-f425-4789-8947-b034786ed374" -nestiness_a_b_c_d: 503 - -Binary representation is as expected - -Roundtrip: -a7522158-3d41-4b77-ad69-6c598ee55c49 Ivan Petrov male 1980-12-29 \N 74951234567\0\0 1 2019-01-05 18:45:00 38 capricorn [] [255,0,0] [55,37] 3.140000104904175 214 0.1 5 17060000000 ['meter','centimeter','kilometer'] [1,0.01,1000] 500 [501,502] -c694ad8a-f714-4ea3-907d-fd54fb25d9b5 Natalia Sokolova female 1992-03-08 \N \N 0 \N 26 pisces [] [100,200,50] [50,-4] 3.141590118408203 \N 0.007 5 -20000000000000 [] [] \N [] -a7da1aa6-f425-4789-8947-b034786ed374 Vasily Sidorov male 1995-07-28 \N 442012345678\0 1 2018-12-30 00:00:00 23 leo [] [250,244,10] [68,33] 3.1415927410125732 100000000000 800 -3 154400000 ['pound'] [16] 503 [] - -Schema 02240_protobuflist3_format_persons:StrPerson as ProtobufList - -Binary representation: -00000000 e4 05 0a a6 02 0a 24 61 37 35 32 32 31 35 38 2d |......$a7522158-| -00000010 33 64 34 31 2d 34 62 37 37 2d 61 64 36 39 2d 36 |3d41-4b77-ad69-6| -00000020 63 35 39 38 65 65 35 35 63 34 39 12 04 49 76 61 |c598ee55c49..Iva| -00000030 6e 1a 06 50 65 74 72 6f 76 22 04 6d 61 6c 65 2a |n..Petrov".male*| -00000040 0a 31 39 38 30 2d 31 32 2d 32 39 3a 0d 2b 37 34 |.1980-12-29:.+74| -00000050 39 35 31 32 33 34 35 36 37 00 42 01 31 4a 13 32 |951234567.B.1J.2| -00000060 30 31 39 2d 30 31 2d 30 35 20 31 38 3a 34 35 3a |019-01-05 18:45:| -00000070 30 30 52 02 33 38 5a 09 63 61 70 72 69 63 6f 72 |00R.38Z.capricor| -00000080 6e 62 09 59 65 73 74 65 72 64 61 79 62 07 46 6c |nb.Yesterdayb.Fl| -00000090 6f 77 65 72 73 6a 03 32 35 35 6a 01 30 6a 01 30 |owersj.255j.0j.0| -000000a0 72 06 4d 6f 73 63 6f 77 7a 09 35 35 2e 37 35 33 |r.Moscowz.55.753| -000000b0 32 31 35 7a 09 33 37 2e 36 32 32 35 30 34 82 01 |215z.37.622504..| -000000c0 04 33 2e 31 34 8a 01 05 32 31 34 2e 31 92 01 03 |.3.14...214.1...| -000000d0 30 2e 31 9a 01 03 35 2e 38 a2 01 0b 31 37 30 36 |0.1...5.8...1706| -000000e0 30 30 30 30 30 30 30 aa 01 2d 0a 05 6d 65 74 65 |0000000..-..mete| -000000f0 72 0a 0a 63 65 6e 74 69 6d 65 74 65 72 0a 09 6b |r..centimeter..k| -00000100 69 6c 6f 6d 65 74 65 72 12 01 31 12 04 30 2e 30 |ilometer..1..0.0| -00000110 31 12 04 31 30 30 30 b2 01 11 0a 0f 0a 03 35 30 |1..1000.......50| -00000120 30 12 03 35 30 31 12 03 35 30 32 0a b4 01 0a 24 |0..501..502....$| -00000130 63 36 39 34 61 64 38 61 2d 66 37 31 34 2d 34 65 |c694ad8a-f714-4e| -00000140 61 33 2d 39 30 37 64 2d 66 64 35 34 66 62 32 35 |a3-907d-fd54fb25| -00000150 64 39 62 35 12 07 4e 61 74 61 6c 69 61 1a 08 53 |d9b5..Natalia..S| -00000160 6f 6b 6f 6c 6f 76 61 22 06 66 65 6d 61 6c 65 2a |okolova".female*| -00000170 0a 31 39 39 32 2d 30 33 2d 30 38 42 01 30 52 02 |.1992-03-08B.0R.| -00000180 32 36 5a 06 70 69 73 63 65 73 6a 03 31 30 30 6a |26Z.piscesj.100j| -00000190 03 32 30 30 6a 02 35 30 72 08 50 6c 79 6d 6f 75 |.200j.50r.Plymou| -000001a0 74 68 7a 09 35 30 2e 34 30 33 37 32 34 7a 09 2d |thz.50.403724z.-| -000001b0 34 2e 31 34 32 31 32 33 82 01 07 33 2e 31 34 31 |4.142123...3.141| -000001c0 35 39 92 01 05 30 2e 30 30 37 9a 01 03 35 2e 34 |59...0.007...5.4| -000001d0 a2 01 0f 2d 32 30 30 30 30 30 30 30 30 30 30 30 |...-200000000000| -000001e0 30 30 0a 81 02 0a 24 61 37 64 61 31 61 61 36 2d |00....$a7da1aa6-| -000001f0 66 34 32 35 2d 34 37 38 39 2d 38 39 34 37 2d 62 |f425-4789-8947-b| -00000200 30 33 34 37 38 36 65 64 33 37 34 12 06 56 61 73 |034786ed374..Vas| -00000210 69 6c 79 1a 07 53 69 64 6f 72 6f 76 22 04 6d 61 |ily..Sidorov".ma| -00000220 6c 65 2a 0a 31 39 39 35 2d 30 37 2d 32 38 3a 0d |le*.1995-07-28:.| -00000230 2b 34 34 32 30 31 32 33 34 35 36 37 38 42 01 31 |+442012345678B.1| -00000240 4a 13 32 30 31 38 2d 31 32 2d 33 30 20 30 30 3a |J.2018-12-30 00:| -00000250 30 30 3a 30 30 52 02 32 33 5a 03 6c 65 6f 62 05 |00:00R.23Z.leob.| -00000260 53 75 6e 6e 79 6a 03 32 35 30 6a 03 32 34 34 6a |Sunnyj.250j.244j| -00000270 02 31 30 72 08 4d 75 72 6d 61 6e 73 6b 7a 09 36 |.10r.Murmanskz.6| -00000280 38 2e 39 37 30 36 38 32 7a 09 33 33 2e 30 37 34 |8.970682z.33.074| -00000290 39 38 31 82 01 10 33 2e 31 34 31 35 39 32 36 35 |981...3.14159265| -000002a0 33 35 38 39 37 39 8a 01 0c 31 30 30 30 30 30 30 |358979...1000000| -000002b0 30 30 30 30 30 92 01 03 38 30 30 9a 01 04 2d 33 |00000...800...-3| -000002c0 2e 32 a2 01 09 31 35 34 34 30 30 30 30 30 aa 01 |.2...154400000..| -000002d0 0b 0a 05 70 6f 75 6e 64 12 02 31 36 b2 01 07 0a |...pound..16....| -000002e0 05 0a 03 35 30 33 |...503| -000002e6 - -MESSAGE #1 AT 0x00000005 -uuid: "a7522158-3d41-4b77-ad69-6c598ee55c49" -name: "Ivan" -surname: "Petrov" -gender: "male" -birthDate: "1980-12-29" -phoneNumber: "+74951234567\000" -isOnline: "1" -visitTime: "2019-01-05 18:45:00" -age: "38" -zodiacSign: "capricorn" -songs: "Yesterday" -songs: "Flowers" -color: "255" -color: "0" -color: "0" -hometown: "Moscow" -location: "55.753215" -location: "37.622504" -pi: "3.14" -lotteryWin: "214.1" -someRatio: "0.1" -temperature: "5.8" -randomBigNumber: "17060000000" -measureUnits { - unit: "meter" - unit: "centimeter" - unit: "kilometer" - coef: "1" - coef: "0.01" - coef: "1000" -} -nestiness_a { - b_c { - d: "500" - e: "501" - e: "502" - } -} -MESSAGE #2 AT 0x0000012E -uuid: "c694ad8a-f714-4ea3-907d-fd54fb25d9b5" -name: "Natalia" -surname: "Sokolova" -gender: "female" -birthDate: "1992-03-08" -isOnline: "0" -age: "26" -zodiacSign: "pisces" -color: "100" -color: "200" -color: "50" -hometown: "Plymouth" -location: "50.403724" -location: "-4.142123" -pi: "3.14159" -someRatio: "0.007" -temperature: "5.4" -randomBigNumber: "-20000000000000" -MESSAGE #3 AT 0x000001E5 -uuid: "a7da1aa6-f425-4789-8947-b034786ed374" -name: "Vasily" -surname: "Sidorov" -gender: "male" -birthDate: "1995-07-28" -phoneNumber: "+442012345678" -isOnline: "1" -visitTime: "2018-12-30 00:00:00" -age: "23" -zodiacSign: "leo" -songs: "Sunny" -color: "250" -color: "244" -color: "10" -hometown: "Murmansk" -location: "68.970682" -location: "33.074981" -pi: "3.14159265358979" -lotteryWin: "100000000000" -someRatio: "800" -temperature: "-3.2" -randomBigNumber: "154400000" -measureUnits { - unit: "pound" - coef: "16" -} -nestiness_a { - b_c { - d: "503" - } -} - -Binary representation is as expected -Roundtrip: -a7522158-3d41-4b77-ad69-6c598ee55c49 Ivan Petrov male 1980-12-29 \N +74951234567\0 1 2019-01-05 18:45:00 38 capricorn ['Yesterday','Flowers'] [255,0,0] Moscow [55.753215,37.622504] 3.14 214.1 0.1 5.8 17060000000 ['meter','centimeter','kilometer'] [1,0.01,1000] 500 [501,502] -c694ad8a-f714-4ea3-907d-fd54fb25d9b5 Natalia Sokolova female 1992-03-08 \N \N 0 \N 26 pisces [] [100,200,50] Plymouth [50.403724,-4.142123] 3.14159 \N 0.007 5.4 -20000000000000 [] [] \N [] -a7da1aa6-f425-4789-8947-b034786ed374 Vasily Sidorov male 1995-07-28 \N +442012345678 1 2018-12-30 00:00:00 23 leo ['Sunny'] [250,244,10] Murmansk [68.970682,33.074981] 3.14159265358979 100000000000 800 -3.2 154400000 ['pound'] [16] 503 [] - -Schema 02240_protobuf_format_syntax2:Syntax2Person - -Binary representation: -00000000 bb 04 0a ef 01 0a 24 61 37 35 32 32 31 35 38 2d |......$a7522158-| -00000010 33 64 34 31 2d 34 62 37 37 2d 61 64 36 39 2d 36 |3d41-4b77-ad69-6| -00000020 63 35 39 38 65 65 35 35 63 34 39 12 04 49 76 61 |c598ee55c49..Iva| -00000030 6e 1a 06 50 65 74 72 6f 76 20 01 28 af 1f 32 03 |n..Petrov .(..2.| -00000040 70 6e 67 3a 0d 2b 37 34 39 35 31 32 33 34 35 36 |png:.+7495123456| -00000050 37 00 40 01 4d fc d0 30 5c 50 26 58 09 62 09 59 |7.@.M..0\P&X.b.Y| -00000060 65 73 74 65 72 64 61 79 62 07 46 6c 6f 77 65 72 |esterdayb.Flower| -00000070 73 6a 04 ff 01 00 00 72 06 4d 6f 73 63 6f 77 7a |sj.....r.Moscowz| -00000080 08 4b 03 5f 42 72 7d 16 42 81 01 1f 85 eb 51 b8 |.K._Br}.B.....Q.| -00000090 1e 09 40 89 01 33 33 33 33 33 c3 6a 40 95 01 cd |..@..33333.j@...| -000000a0 cc cc 3d 9d 01 9a 99 b9 40 a0 01 80 c4 d7 8d 7f |..=.....@.......| -000000b0 ab 01 0a 0c 00 00 80 3f 0a d7 23 3c 00 00 7a 44 |.......?..#<..zD| -000000c0 12 05 6d 65 74 65 72 12 0a 63 65 6e 74 69 6d 65 |..meter..centime| -000000d0 74 65 72 12 09 6b 69 6c 6f 6d 65 74 65 72 ac 01 |ter..kilometer..| -000000e0 b3 01 0b a2 06 0b 0b 08 f4 03 12 04 f5 03 f6 03 |................| -000000f0 0c 0c b4 01 0a 80 01 0a 24 63 36 39 34 61 64 38 |........$c694ad8| -00000100 61 2d 66 37 31 34 2d 34 65 61 33 2d 39 30 37 64 |a-f714-4ea3-907d| -00000110 2d 66 64 35 34 66 62 32 35 64 39 62 35 12 07 4e |-fd54fb25d9b5..N| -00000120 61 74 61 6c 69 61 1a 08 53 6f 6b 6f 6c 6f 76 61 |atalia..Sokolova| -00000130 20 00 28 a6 3f 32 03 6a 70 67 50 1a 58 0b 6a 04 | .(.?2.jpgP.X.j.| -00000140 64 c8 01 32 72 08 50 6c 79 6d 6f 75 74 68 7a 08 |d..2r.Plymouthz.| -00000150 6a 9d 49 42 46 8c 84 c0 81 01 6e 86 1b f0 f9 21 |j.IBF.....n....!| -00000160 09 40 95 01 42 60 e5 3b 9d 01 cd cc ac 40 a0 01 |.@..B`.;.....@..| -00000170 ff ff a9 ce 93 8c 09 0a c3 01 0a 24 61 37 64 61 |...........$a7da| -00000180 31 61 61 36 2d 66 34 32 35 2d 34 37 38 39 2d 38 |1aa6-f425-4789-8| -00000190 39 34 37 2d 62 30 33 34 37 38 36 65 64 33 37 34 |947-b034786ed374| -000001a0 12 06 56 61 73 69 6c 79 1a 07 53 69 64 6f 72 6f |..Vasily..Sidoro| -000001b0 76 20 01 28 fb 48 32 03 62 6d 70 3a 0d 2b 34 34 |v .(.H2.bmp:.+44| -000001c0 32 30 31 32 33 34 35 36 37 38 40 01 4d 50 e0 27 |2012345678@.MP.'| -000001d0 5c 50 17 58 04 62 05 53 75 6e 6e 79 6a 05 fa 01 |\P.X.b.Sunnyj...| -000001e0 f4 01 0a 72 08 4d 75 72 6d 61 6e 73 6b 7a 08 fd |...r.Murmanskz..| -000001f0 f0 89 42 c8 4c 04 42 81 01 11 2d 44 54 fb 21 09 |..B.L.B...-DT.!.| -00000200 40 89 01 00 00 00 e8 76 48 37 42 95 01 00 00 48 |@......vH7B....H| -00000210 44 9d 01 cd cc 4c c0 a0 01 80 d4 9f 93 01 ab 01 |D....L..........| -00000220 0a 04 00 00 80 41 12 05 70 6f 75 6e 64 ac 01 b3 |.....A..pound...| -00000230 01 0b a2 06 05 0b 08 f7 03 0c 0c b4 01 |.............| -0000023d - -MESSAGE #1 AT 0x00000005 -uuid: "a7522158-3d41-4b77-ad69-6c598ee55c49" -name: "Ivan" -surname: "Petrov" -gender: male -birthDate: 4015 -photo: "png" -phoneNumber: "+74951234567\000" -isOnline: true -visitTime: 1546703100 -age: 38 -zodiacSign: capricorn -songs: "Yesterday" -songs: "Flowers" -color: 255 -color: 0 -color: 0 -hometown: "Moscow" -location: 55.7532158 -location: 37.6225052 -pi: 3.14 -lotteryWin: 214.1 -someRatio: 0.1 -temperature: 5.8 -randomBigNumber: 17060000000 -MeasureUnits { - coef: 1 - coef: 0.01 - coef: 1000 - unit: "meter" - unit: "centimeter" - unit: "kilometer" -} -Nestiness { - A { - b { - C { - d: 500 - e: 501 - e: 502 - } - } - } -} -MESSAGE #2 AT 0x000000F7 -uuid: "c694ad8a-f714-4ea3-907d-fd54fb25d9b5" -name: "Natalia" -surname: "Sokolova" -gender: female -birthDate: 8102 -photo: "jpg" -age: 26 -zodiacSign: pisces -color: 100 -color: 200 -color: 50 -hometown: "Plymouth" -location: 50.4037247 -location: -4.14212322 -pi: 3.14159 -someRatio: 0.007 -temperature: 5.4 -randomBigNumber: -20000000000000 -MESSAGE #3 AT 0x0000017A -uuid: "a7da1aa6-f425-4789-8947-b034786ed374" -name: "Vasily" -surname: "Sidorov" -gender: male -birthDate: 9339 -photo: "bmp" -phoneNumber: "+442012345678" -isOnline: true -visitTime: 1546117200 -age: 23 -zodiacSign: leo -songs: "Sunny" -color: 250 -color: 244 -color: 10 -hometown: "Murmansk" -location: 68.9706802 -location: 33.0749817 -pi: 3.14159265358979 -lotteryWin: 100000000000 -someRatio: 800 -temperature: -3.2 -randomBigNumber: 154400000 -MeasureUnits { - coef: 16 - unit: "pound" -} -Nestiness { - A { - b { - C { - d: 503 - } - } - } -} - -Binary representation differs from the expected one (listed below): -00000000 be 04 0a f1 01 0a 24 61 37 35 32 32 31 35 38 2d |......$a7522158-| -00000010 33 64 34 31 2d 34 62 37 37 2d 61 64 36 39 2d 36 |3d41-4b77-ad69-6| -00000020 63 35 39 38 65 65 35 35 63 34 39 12 04 49 76 61 |c598ee55c49..Iva| -00000030 6e 1a 06 50 65 74 72 6f 76 20 01 28 af 1f 32 03 |n..Petrov .(..2.| -00000040 70 6e 67 3a 0d 2b 37 34 39 35 31 32 33 34 35 36 |png:.+7495123456| -00000050 37 00 40 01 4d fc d0 30 5c 50 26 58 09 62 09 59 |7.@.M..0\P&X.b.Y| -00000060 65 73 74 65 72 64 61 79 62 07 46 6c 6f 77 65 72 |esterdayb.Flower| -00000070 73 68 ff 01 68 00 68 00 72 06 4d 6f 73 63 6f 77 |sh..h.h.r.Moscow| -00000080 7a 08 4b 03 5f 42 72 7d 16 42 81 01 1f 85 eb 51 |z.K._Br}.B.....Q| -00000090 b8 1e 09 40 89 01 33 33 33 33 33 c3 6a 40 95 01 |...@..33333.j@..| -000000a0 cd cc cc 3d 9d 01 9a 99 b9 40 a0 01 80 c4 d7 8d |...=.....@......| -000000b0 7f ab 01 0d 00 00 80 3f 0d 0a d7 23 3c 0d 00 00 |.......?...#<...| -000000c0 7a 44 12 05 6d 65 74 65 72 12 0a 63 65 6e 74 69 |zD..meter..centi| -000000d0 6d 65 74 65 72 12 09 6b 69 6c 6f 6d 65 74 65 72 |meter..kilometer| -000000e0 ac 01 b3 01 0b a2 06 0b 0b 08 f4 03 10 f5 03 10 |................| -000000f0 f6 03 0c 0c b4 01 0a 81 01 0a 24 63 36 39 34 61 |..........$c694a| -00000100 64 38 61 2d 66 37 31 34 2d 34 65 61 33 2d 39 30 |d8a-f714-4ea3-90| -00000110 37 64 2d 66 64 35 34 66 62 32 35 64 39 62 35 12 |7d-fd54fb25d9b5.| -00000120 07 4e 61 74 61 6c 69 61 1a 08 53 6f 6b 6f 6c 6f |.Natalia..Sokolo| -00000130 76 61 20 00 28 a6 3f 32 03 6a 70 67 50 1a 58 0b |va .(.?2.jpgP.X.| -00000140 68 64 68 c8 01 68 32 72 08 50 6c 79 6d 6f 75 74 |hdh..h2r.Plymout| -00000150 68 7a 08 6a 9d 49 42 46 8c 84 c0 81 01 6e 86 1b |hz.j.IBF.....n..| -00000160 f0 f9 21 09 40 95 01 42 60 e5 3b 9d 01 cd cc ac |..!.@..B`.;.....| -00000170 40 a0 01 ff ff a9 ce 93 8c 09 0a c3 01 0a 24 61 |@.............$a| -00000180 37 64 61 31 61 61 36 2d 66 34 32 35 2d 34 37 38 |7da1aa6-f425-478| -00000190 39 2d 38 39 34 37 2d 62 30 33 34 37 38 36 65 64 |9-8947-b034786ed| -000001a0 33 37 34 12 06 56 61 73 69 6c 79 1a 07 53 69 64 |374..Vasily..Sid| -000001b0 6f 72 6f 76 20 01 28 fb 48 32 03 62 6d 70 3a 0d |orov .(.H2.bmp:.| -000001c0 2b 34 34 32 30 31 32 33 34 35 36 37 38 40 01 4d |+442012345678@.M| -000001d0 50 e0 27 5c 50 17 58 04 62 05 53 75 6e 6e 79 68 |P.'\P.X.b.Sunnyh| -000001e0 fa 01 68 f4 01 68 0a 72 08 4d 75 72 6d 61 6e 73 |..h..h.r.Murmans| -000001f0 6b 7a 08 fd f0 89 42 c8 4c 04 42 81 01 11 2d 44 |kz....B.L.B...-D| -00000200 54 fb 21 09 40 89 01 00 00 00 e8 76 48 37 42 95 |T.!.@......vH7B.| -00000210 01 00 00 48 44 9d 01 cd cc 4c c0 a0 01 80 d4 9f |...HD....L......| -00000220 93 01 ab 01 0d 00 00 80 41 12 05 70 6f 75 6e 64 |........A..pound| -00000230 ac 01 b3 01 0b a2 06 05 0b 08 f7 03 0c 0c b4 01 |................| -00000240 - -stdout: -a7522158-3d41-4b77-ad69-6c598ee55c49 Ivan Petrov male 1980-12-29 png +74951234567\0 1 2019-01-05 18:45:00 38 capricorn ['Yesterday','Flowers'] [255,0,0] Moscow [55.753215,37.622504] 3.14 214.1 0.1 5.8 17060000000 ['meter','centimeter','kilometer'] [1,0.01,1000] 500 [501,502] -c694ad8a-f714-4ea3-907d-fd54fb25d9b5 Natalia Sokolova female 1992-03-08 jpg \N 0 \N 26 pisces [] [100,200,50] Plymouth [50.403724,-4.142123] 3.14159 \N 0.007 5.4 -20000000000000 [] [] \N [] -a7da1aa6-f425-4789-8947-b034786ed374 Vasily Sidorov male 1995-07-28 bmp +442012345678 1 2018-12-30 00:00:00 23 leo ['Sunny'] [250,244,10] Murmansk [68.970682,33.074981] 3.14159265358979 100000000000 800 -3.2 154400000 ['pound'] [16] 503 [] - -Schema 02240_protobuflist1_format_persons:Person - -Binary representation: -00000000 ba 04 0a f4 01 0a 24 61 37 35 32 32 31 35 38 2d |......$a7522158-| -00000010 33 64 34 31 2d 34 62 37 37 2d 61 64 36 39 2d 36 |3d41-4b77-ad69-6| -00000020 63 35 39 38 65 65 35 35 63 34 39 12 04 49 76 61 |c598ee55c49..Iva| -00000030 6e 1a 06 50 65 74 72 6f 76 20 01 28 af 1f 32 03 |n..Petrov .(..2.| -00000040 70 6e 67 3a 0d 2b 37 34 39 35 31 32 33 34 35 36 |png:.+7495123456| -00000050 37 00 40 01 4d fc d0 30 5c 50 26 58 09 62 09 59 |7.@.M..0\P&X.b.Y| -00000060 65 73 74 65 72 64 61 79 62 07 46 6c 6f 77 65 72 |esterdayb.Flower| -00000070 73 6a 04 ff 01 00 00 72 06 4d 6f 73 63 6f 77 7a |sj.....r.Moscowz| -00000080 08 4b 03 5f 42 72 7d 16 42 81 01 1f 85 eb 51 b8 |.K._Br}.B.....Q.| -00000090 1e 09 40 89 01 33 33 33 33 33 c3 6a 40 95 01 cd |..@..33333.j@...| -000000a0 cc cc 3d 9d 01 9a 99 b9 40 a0 01 80 c4 d7 8d 7f |..=.....@.......| -000000b0 aa 01 0c 0a 05 6d 65 74 65 72 15 00 00 80 3f aa |.....meter....?.| -000000c0 01 11 0a 0a 63 65 6e 74 69 6d 65 74 65 72 15 0a |....centimeter..| -000000d0 d7 23 3c aa 01 10 0a 09 6b 69 6c 6f 6d 65 74 65 |.#<.....kilomete| -000000e0 72 15 00 00 7a 44 b2 01 10 0a 0e a2 06 0b 0a 09 |r...zD..........| -000000f0 08 f4 03 12 04 f5 03 f6 03 0a 7e 0a 24 63 36 39 |..........~.$c69| -00000100 34 61 64 38 61 2d 66 37 31 34 2d 34 65 61 33 2d |4ad8a-f714-4ea3-| -00000110 39 30 37 64 2d 66 64 35 34 66 62 32 35 64 39 62 |907d-fd54fb25d9b| -00000120 35 12 07 4e 61 74 61 6c 69 61 1a 08 53 6f 6b 6f |5..Natalia..Soko| -00000130 6c 6f 76 61 28 a6 3f 32 03 6a 70 67 50 1a 58 0b |lova(.?2.jpgP.X.| -00000140 6a 04 64 c8 01 32 72 08 50 6c 79 6d 6f 75 74 68 |j.d..2r.Plymouth| -00000150 7a 08 6a 9d 49 42 46 8c 84 c0 81 01 6e 86 1b f0 |z.j.IBF.....n...| -00000160 f9 21 09 40 95 01 42 60 e5 3b 9d 01 cd cc ac 40 |.!.@..B`.;.....@| -00000170 a0 01 ff ff a9 ce 93 8c 09 0a c0 01 0a 24 61 37 |.............$a7| -00000180 64 61 31 61 61 36 2d 66 34 32 35 2d 34 37 38 39 |da1aa6-f425-4789| -00000190 2d 38 39 34 37 2d 62 30 33 34 37 38 36 65 64 33 |-8947-b034786ed3| -000001a0 37 34 12 06 56 61 73 69 6c 79 1a 07 53 69 64 6f |74..Vasily..Sido| -000001b0 72 6f 76 20 01 28 fb 48 32 03 62 6d 70 3a 0d 2b |rov .(.H2.bmp:.+| -000001c0 34 34 32 30 31 32 33 34 35 36 37 38 40 01 4d 50 |442012345678@.MP| -000001d0 e0 27 5c 50 17 58 04 62 05 53 75 6e 6e 79 6a 05 |.'\P.X.b.Sunnyj.| -000001e0 fa 01 f4 01 0a 72 08 4d 75 72 6d 61 6e 73 6b 7a |.....r.Murmanskz| -000001f0 08 fd f0 89 42 c8 4c 04 42 81 01 11 2d 44 54 fb |....B.L.B...-DT.| -00000200 21 09 40 89 01 00 00 00 e8 76 48 37 42 95 01 00 |!.@......vH7B...| -00000210 00 48 44 9d 01 cd cc 4c c0 a0 01 80 d4 9f 93 01 |.HD....L........| -00000220 aa 01 0c 0a 05 70 6f 75 6e 64 15 00 00 80 41 b2 |.....pound....A.| -00000230 01 0a 0a 08 a2 06 05 0a 03 08 f7 03 |............| -0000023c - -MESSAGE #1 AT 0x00000005 -uuid: "a7522158-3d41-4b77-ad69-6c598ee55c49" -name: "Ivan" -surname: "Petrov" -gender: male -birthDate: 4015 -photo: "png" -phoneNumber: "+74951234567\000" -isOnline: true -visitTime: 1546703100 -age: 38 -zodiacSign: capricorn -songs: "Yesterday" -songs: "Flowers" -color: 255 -color: 0 -color: 0 -hometown: "Moscow" -location: 55.7532158 -location: 37.6225052 -pi: 3.14 -lotteryWin: 214.1 -someRatio: 0.1 -temperature: 5.8 -randomBigNumber: 17060000000 -measureUnits { - unit: "meter" - coef: 1 -} -measureUnits { - unit: "centimeter" - coef: 0.01 -} -measureUnits { - unit: "kilometer" - coef: 1000 -} -nestiness { - a { - b { - c { - d: 500 - e: 501 - e: 502 - } - } - } -} -MESSAGE #2 AT 0x000000FB -uuid: "c694ad8a-f714-4ea3-907d-fd54fb25d9b5" -name: "Natalia" -surname: "Sokolova" -birthDate: 8102 -photo: "jpg" -age: 26 -zodiacSign: pisces -color: 100 -color: 200 -color: 50 -hometown: "Plymouth" -location: 50.4037247 -location: -4.14212322 -pi: 3.14159 -someRatio: 0.007 -temperature: 5.4 -randomBigNumber: -20000000000000 -MESSAGE #3 AT 0x0000017C -uuid: "a7da1aa6-f425-4789-8947-b034786ed374" -name: "Vasily" -surname: "Sidorov" -gender: male -birthDate: 9339 -photo: "bmp" -phoneNumber: "+442012345678" -isOnline: true -visitTime: 1546117200 -age: 23 -zodiacSign: leo -songs: "Sunny" -color: 250 -color: 244 -color: 10 -hometown: "Murmansk" -location: 68.9706802 -location: 33.0749817 -pi: 3.14159265358979 -lotteryWin: 100000000000 -someRatio: 800 -temperature: -3.2 -randomBigNumber: 154400000 -measureUnits { - unit: "pound" - coef: 16 -} -nestiness { - a { - b { - c { - d: 503 - } - } - } -} - -Binary representation is as expected - -Roundtrip: -a7522158-3d41-4b77-ad69-6c598ee55c49 Ivan Petrov male 1980-12-29 png +74951234567\0 1 2019-01-05 18:45:00 38 capricorn ['Yesterday','Flowers'] [255,0,0] Moscow [55.753216,37.622504] 3.14 214.1 0.1 5.8 17060000000 ['meter','centimeter','kilometer'] [1,0.01,1000] 500 [501,502] -c694ad8a-f714-4ea3-907d-fd54fb25d9b5 Natalia Sokolova female 1992-03-08 jpg \N 0 \N 26 pisces [] [100,200,50] Plymouth [50.403724,-4.142123] 3.14159 \N 0.007 5.4 -20000000000000 [] [] \N [] -a7da1aa6-f425-4789-8947-b034786ed374 Vasily Sidorov male 1995-07-28 bmp +442012345678 1 2018-12-30 00:00:00 23 leo ['Sunny'] [250,244,10] Murmansk [68.97068,33.074982] 3.14159265358979 100000000000 800 -3.2 154400000 ['pound'] [16] 503 [] - -Schema 02240_protobuflist2_format_persons:AltPerson - -Binary representation: -00000000 f4 03 0a c4 01 08 01 12 04 49 76 61 6e 28 87 a8 |.........Ivan(..| -00000010 c4 9b 97 02 52 06 50 65 74 72 6f 76 72 0c 00 00 |....R.Petrovr...| -00000020 7f 43 00 00 00 00 00 00 00 00 79 fc d0 30 5c 00 |.C........y..0\.| -00000030 00 00 00 c8 02 0a c2 05 0c 00 00 80 3f 0a d7 23 |............?..#| -00000040 3c 00 00 7a 44 9a 06 05 6d 65 74 65 72 9a 06 0a |<..zD...meter...| -00000050 63 65 6e 74 69 6d 65 74 65 72 9a 06 09 6b 69 6c |centimeter...kil| -00000060 6f 6d 65 74 65 72 a1 06 00 00 00 a0 99 99 b9 3f |ometer.........?| -00000070 a8 06 37 a8 06 25 bd 06 c3 f5 48 40 fa 06 02 33 |..7..%....H@...3| -00000080 38 90 08 c6 09 e1 08 00 f1 da f8 03 00 00 00 b0 |8...............| -00000090 09 af 1f d0 0c d6 01 e2 12 24 61 37 35 32 32 31 |.........$a75221| -000000a0 35 38 2d 33 64 34 31 2d 34 62 37 37 2d 61 64 36 |58-3d41-4b77-ad6| -000000b0 39 2d 36 63 35 39 38 65 65 35 35 63 34 39 a0 38 |9-6c598ee55c49.8| -000000c0 f4 03 aa 38 04 f5 03 f6 03 0a 84 01 12 07 4e 61 |...8..........Na| -000000d0 74 61 6c 69 61 52 08 53 6f 6b 6f 6c 6f 76 61 72 |taliaR.Sokolovar| -000000e0 0c 00 00 c8 42 00 00 48 43 00 00 48 42 c8 02 0a |....B..HC..HB...| -000000f0 a1 06 00 00 00 40 08 ac 7c 3f a8 06 32 a8 06 fc |.....@..|?..2...| -00000100 ff ff ff ff ff ff ff ff 01 b0 06 01 bd 06 d0 0f |................| -00000110 49 40 fa 06 02 32 36 90 08 db 01 e1 08 00 c0 1a |I@...26.........| -00000120 63 cf ed ff ff b0 09 a6 3f e2 12 24 63 36 39 34 |c.......?..$c694| -00000130 61 64 38 61 2d 66 37 31 34 2d 34 65 61 33 2d 39 |ad8a-f714-4ea3-9| -00000140 30 37 64 2d 66 64 35 34 66 62 32 35 64 39 62 35 |07d-fd54fb25d9b5| -00000150 0a a3 01 08 01 12 06 56 61 73 69 6c 79 28 ce ca |.......Vasily(..| -00000160 f4 cf ee 0c 52 07 53 69 64 6f 72 6f 76 72 0c 00 |....R.Sidorovr..| -00000170 00 7a 43 00 00 74 43 00 00 20 41 79 50 e0 27 5c |.zC..tC.. AyP.'\| -00000180 00 00 00 00 c8 02 05 c2 05 04 00 00 80 41 9a 06 |.............A..| -00000190 05 70 6f 75 6e 64 a1 06 00 00 00 00 00 00 89 40 |.pound.........@| -000001a0 a8 06 44 a8 06 21 bd 06 db 0f 49 40 fa 06 02 32 |..D..!....I@...2| -000001b0 33 90 08 d3 05 e1 08 00 f5 33 09 00 00 00 00 b0 |3........3......| -000001c0 09 fb 48 d0 0c 80 d0 db c3 f4 02 e2 12 24 61 37 |..H..........$a7| -000001d0 64 61 31 61 61 36 2d 66 34 32 35 2d 34 37 38 39 |da1aa6-f425-4789| -000001e0 2d 38 39 34 37 2d 62 30 33 34 37 38 36 65 64 33 |-8947-b034786ed3| -000001f0 37 34 a0 38 f7 03 |74.8..| -000001f6 - -MESSAGE #1 AT 0x00000005 -isOnline: online -name: "Ivan" -phoneNumber: 74951234567 -surname: "Petrov" -color: 255 -color: 0 -color: 0 -visitTime: 1546703100 -temperature: 5 -measureUnits_coef: 1 -measureUnits_coef: 0.01 -measureUnits_coef: 1000 -measureUnits_unit: "meter" -measureUnits_unit: "centimeter" -measureUnits_unit: "kilometer" -someRatio: 0.10000000149011612 -location: 55 -location: 37 -pi: 3.14 -age: "38" -zodiacSign: 1222 -randomBigNumber: 17060000000 -birthDate: 4015 -lotteryWin: 214 -uuid: "a7522158-3d41-4b77-ad69-6c598ee55c49" -nestiness_a_b_c_d: 500 -nestiness_a_b_c_e: 501 -nestiness_a_b_c_e: 502 -MESSAGE #2 AT 0x000000CC -name: "Natalia" -surname: "Sokolova" -color: 100 -color: 200 -color: 50 -temperature: 5 -someRatio: 0.0070000002160668373 -location: 50 -location: -4 -gender: female -pi: 3.14159 -age: "26" -zodiacSign: 219 -randomBigNumber: -20000000000000 -birthDate: 8102 -uuid: "c694ad8a-f714-4ea3-907d-fd54fb25d9b5" -MESSAGE #3 AT 0x00000153 -isOnline: online -name: "Vasily" -phoneNumber: 442012345678 -surname: "Sidorov" -color: 250 -color: 244 -color: 10 -visitTime: 1546117200 -temperature: -3 -measureUnits_coef: 16 -measureUnits_unit: "pound" -someRatio: 800 -location: 68 -location: 33 -pi: 3.14159274 -age: "23" -zodiacSign: 723 -randomBigNumber: 154400000 -birthDate: 9339 -lotteryWin: 100000000000 -uuid: "a7da1aa6-f425-4789-8947-b034786ed374" -nestiness_a_b_c_d: 503 - -Binary representation is as expected - -Roundtrip: -a7522158-3d41-4b77-ad69-6c598ee55c49 Ivan Petrov male 1980-12-29 \N 74951234567\0\0 1 2019-01-05 18:45:00 38 capricorn [] [255,0,0] [55,37] 3.140000104904175 214 0.1 5 17060000000 ['meter','centimeter','kilometer'] [1,0.01,1000] 500 [501,502] -c694ad8a-f714-4ea3-907d-fd54fb25d9b5 Natalia Sokolova female 1992-03-08 \N \N 0 \N 26 pisces [] [100,200,50] [50,-4] 3.141590118408203 \N 0.007 5 -20000000000000 [] [] \N [] -a7da1aa6-f425-4789-8947-b034786ed374 Vasily Sidorov male 1995-07-28 \N 442012345678\0 1 2018-12-30 00:00:00 23 leo [] [250,244,10] [68,33] 3.1415927410125732 100000000000 800 -3 154400000 ['pound'] [16] 503 [] - -Schema 02240_protobuflist3_format_persons:StrPerson as ProtobufList - -Binary representation: -00000000 e4 05 0a a6 02 0a 24 61 37 35 32 32 31 35 38 2d |......$a7522158-| -00000010 33 64 34 31 2d 34 62 37 37 2d 61 64 36 39 2d 36 |3d41-4b77-ad69-6| -00000020 63 35 39 38 65 65 35 35 63 34 39 12 04 49 76 61 |c598ee55c49..Iva| -00000030 6e 1a 06 50 65 74 72 6f 76 22 04 6d 61 6c 65 2a |n..Petrov".male*| -00000040 0a 31 39 38 30 2d 31 32 2d 32 39 3a 0d 2b 37 34 |.1980-12-29:.+74| -00000050 39 35 31 32 33 34 35 36 37 00 42 01 31 4a 13 32 |951234567.B.1J.2| -00000060 30 31 39 2d 30 31 2d 30 35 20 31 38 3a 34 35 3a |019-01-05 18:45:| -00000070 30 30 52 02 33 38 5a 09 63 61 70 72 69 63 6f 72 |00R.38Z.capricor| -00000080 6e 62 09 59 65 73 74 65 72 64 61 79 62 07 46 6c |nb.Yesterdayb.Fl| -00000090 6f 77 65 72 73 6a 03 32 35 35 6a 01 30 6a 01 30 |owersj.255j.0j.0| -000000a0 72 06 4d 6f 73 63 6f 77 7a 09 35 35 2e 37 35 33 |r.Moscowz.55.753| -000000b0 32 31 35 7a 09 33 37 2e 36 32 32 35 30 34 82 01 |215z.37.622504..| -000000c0 04 33 2e 31 34 8a 01 05 32 31 34 2e 31 92 01 03 |.3.14...214.1...| -000000d0 30 2e 31 9a 01 03 35 2e 38 a2 01 0b 31 37 30 36 |0.1...5.8...1706| -000000e0 30 30 30 30 30 30 30 aa 01 2d 0a 05 6d 65 74 65 |0000000..-..mete| -000000f0 72 0a 0a 63 65 6e 74 69 6d 65 74 65 72 0a 09 6b |r..centimeter..k| -00000100 69 6c 6f 6d 65 74 65 72 12 01 31 12 04 30 2e 30 |ilometer..1..0.0| -00000110 31 12 04 31 30 30 30 b2 01 11 0a 0f 0a 03 35 30 |1..1000.......50| -00000120 30 12 03 35 30 31 12 03 35 30 32 0a b4 01 0a 24 |0..501..502....$| -00000130 63 36 39 34 61 64 38 61 2d 66 37 31 34 2d 34 65 |c694ad8a-f714-4e| -00000140 61 33 2d 39 30 37 64 2d 66 64 35 34 66 62 32 35 |a3-907d-fd54fb25| -00000150 64 39 62 35 12 07 4e 61 74 61 6c 69 61 1a 08 53 |d9b5..Natalia..S| -00000160 6f 6b 6f 6c 6f 76 61 22 06 66 65 6d 61 6c 65 2a |okolova".female*| -00000170 0a 31 39 39 32 2d 30 33 2d 30 38 42 01 30 52 02 |.1992-03-08B.0R.| -00000180 32 36 5a 06 70 69 73 63 65 73 6a 03 31 30 30 6a |26Z.piscesj.100j| -00000190 03 32 30 30 6a 02 35 30 72 08 50 6c 79 6d 6f 75 |.200j.50r.Plymou| -000001a0 74 68 7a 09 35 30 2e 34 30 33 37 32 34 7a 09 2d |thz.50.403724z.-| -000001b0 34 2e 31 34 32 31 32 33 82 01 07 33 2e 31 34 31 |4.142123...3.141| -000001c0 35 39 92 01 05 30 2e 30 30 37 9a 01 03 35 2e 34 |59...0.007...5.4| -000001d0 a2 01 0f 2d 32 30 30 30 30 30 30 30 30 30 30 30 |...-200000000000| -000001e0 30 30 0a 81 02 0a 24 61 37 64 61 31 61 61 36 2d |00....$a7da1aa6-| -000001f0 66 34 32 35 2d 34 37 38 39 2d 38 39 34 37 2d 62 |f425-4789-8947-b| -00000200 30 33 34 37 38 36 65 64 33 37 34 12 06 56 61 73 |034786ed374..Vas| -00000210 69 6c 79 1a 07 53 69 64 6f 72 6f 76 22 04 6d 61 |ily..Sidorov".ma| -00000220 6c 65 2a 0a 31 39 39 35 2d 30 37 2d 32 38 3a 0d |le*.1995-07-28:.| -00000230 2b 34 34 32 30 31 32 33 34 35 36 37 38 42 01 31 |+442012345678B.1| -00000240 4a 13 32 30 31 38 2d 31 32 2d 33 30 20 30 30 3a |J.2018-12-30 00:| -00000250 30 30 3a 30 30 52 02 32 33 5a 03 6c 65 6f 62 05 |00:00R.23Z.leob.| -00000260 53 75 6e 6e 79 6a 03 32 35 30 6a 03 32 34 34 6a |Sunnyj.250j.244j| -00000270 02 31 30 72 08 4d 75 72 6d 61 6e 73 6b 7a 09 36 |.10r.Murmanskz.6| -00000280 38 2e 39 37 30 36 38 32 7a 09 33 33 2e 30 37 34 |8.970682z.33.074| -00000290 39 38 31 82 01 10 33 2e 31 34 31 35 39 32 36 35 |981...3.14159265| -000002a0 33 35 38 39 37 39 8a 01 0c 31 30 30 30 30 30 30 |358979...1000000| -000002b0 30 30 30 30 30 92 01 03 38 30 30 9a 01 04 2d 33 |00000...800...-3| -000002c0 2e 32 a2 01 09 31 35 34 34 30 30 30 30 30 aa 01 |.2...154400000..| -000002d0 0b 0a 05 70 6f 75 6e 64 12 02 31 36 b2 01 07 0a |...pound..16....| -000002e0 05 0a 03 35 30 33 |...503| -000002e6 - -MESSAGE #1 AT 0x00000005 -uuid: "a7522158-3d41-4b77-ad69-6c598ee55c49" -name: "Ivan" -surname: "Petrov" -gender: "male" -birthDate: "1980-12-29" -phoneNumber: "+74951234567\000" -isOnline: "1" -visitTime: "2019-01-05 18:45:00" -age: "38" -zodiacSign: "capricorn" -songs: "Yesterday" -songs: "Flowers" -color: "255" -color: "0" -color: "0" -hometown: "Moscow" -location: "55.753215" -location: "37.622504" -pi: "3.14" -lotteryWin: "214.1" -someRatio: "0.1" -temperature: "5.8" -randomBigNumber: "17060000000" -measureUnits { - unit: "meter" - unit: "centimeter" - unit: "kilometer" - coef: "1" - coef: "0.01" - coef: "1000" -} -nestiness_a { - b_c { - d: "500" - e: "501" - e: "502" - } -} -MESSAGE #2 AT 0x0000012E -uuid: "c694ad8a-f714-4ea3-907d-fd54fb25d9b5" -name: "Natalia" -surname: "Sokolova" -gender: "female" -birthDate: "1992-03-08" -isOnline: "0" -age: "26" -zodiacSign: "pisces" -color: "100" -color: "200" -color: "50" -hometown: "Plymouth" -location: "50.403724" -location: "-4.142123" -pi: "3.14159" -someRatio: "0.007" -temperature: "5.4" -randomBigNumber: "-20000000000000" -MESSAGE #3 AT 0x000001E5 -uuid: "a7da1aa6-f425-4789-8947-b034786ed374" -name: "Vasily" -surname: "Sidorov" -gender: "male" -birthDate: "1995-07-28" -phoneNumber: "+442012345678" -isOnline: "1" -visitTime: "2018-12-30 00:00:00" -age: "23" -zodiacSign: "leo" -songs: "Sunny" -color: "250" -color: "244" -color: "10" -hometown: "Murmansk" -location: "68.970682" -location: "33.074981" -pi: "3.14159265358979" -lotteryWin: "100000000000" -someRatio: "800" -temperature: "-3.2" -randomBigNumber: "154400000" -measureUnits { - unit: "pound" - coef: "16" -} -nestiness_a { - b_c { - d: "503" - } -} - -Binary representation is as expected -Roundtrip: -a7522158-3d41-4b77-ad69-6c598ee55c49 Ivan Petrov male 1980-12-29 \N +74951234567\0 1 2019-01-05 18:45:00 38 capricorn ['Yesterday','Flowers'] [255,0,0] Moscow [55.753215,37.622504] 3.14 214.1 0.1 5.8 17060000000 ['meter','centimeter','kilometer'] [1,0.01,1000] 500 [501,502] -c694ad8a-f714-4ea3-907d-fd54fb25d9b5 Natalia Sokolova female 1992-03-08 \N \N 0 \N 26 pisces [] [100,200,50] Plymouth [50.403724,-4.142123] 3.14159 \N 0.007 5.4 -20000000000000 [] [] \N [] -a7da1aa6-f425-4789-8947-b034786ed374 Vasily Sidorov male 1995-07-28 \N +442012345678 1 2018-12-30 00:00:00 23 leo ['Sunny'] [250,244,10] Murmansk [68.970682,33.074981] 3.14159265358979 100000000000 800 -3.2 154400000 ['pound'] [16] 503 [] - -Schema 02240_protobuf_format_syntax2:Syntax2Person - -Binary representation: -00000000 bb 04 0a ef 01 0a 24 61 37 35 32 32 31 35 38 2d |......$a7522158-| -00000010 33 64 34 31 2d 34 62 37 37 2d 61 64 36 39 2d 36 |3d41-4b77-ad69-6| -00000020 63 35 39 38 65 65 35 35 63 34 39 12 04 49 76 61 |c598ee55c49..Iva| -00000030 6e 1a 06 50 65 74 72 6f 76 20 01 28 af 1f 32 03 |n..Petrov .(..2.| -00000040 70 6e 67 3a 0d 2b 37 34 39 35 31 32 33 34 35 36 |png:.+7495123456| -00000050 37 00 40 01 4d fc d0 30 5c 50 26 58 09 62 09 59 |7.@.M..0\P&X.b.Y| -00000060 65 73 74 65 72 64 61 79 62 07 46 6c 6f 77 65 72 |esterdayb.Flower| -00000070 73 6a 04 ff 01 00 00 72 06 4d 6f 73 63 6f 77 7a |sj.....r.Moscowz| -00000080 08 4b 03 5f 42 72 7d 16 42 81 01 1f 85 eb 51 b8 |.K._Br}.B.....Q.| -00000090 1e 09 40 89 01 33 33 33 33 33 c3 6a 40 95 01 cd |..@..33333.j@...| -000000a0 cc cc 3d 9d 01 9a 99 b9 40 a0 01 80 c4 d7 8d 7f |..=.....@.......| -000000b0 ab 01 0a 0c 00 00 80 3f 0a d7 23 3c 00 00 7a 44 |.......?..#<..zD| -000000c0 12 05 6d 65 74 65 72 12 0a 63 65 6e 74 69 6d 65 |..meter..centime| -000000d0 74 65 72 12 09 6b 69 6c 6f 6d 65 74 65 72 ac 01 |ter..kilometer..| -000000e0 b3 01 0b a2 06 0b 0b 08 f4 03 12 04 f5 03 f6 03 |................| -000000f0 0c 0c b4 01 0a 80 01 0a 24 63 36 39 34 61 64 38 |........$c694ad8| -00000100 61 2d 66 37 31 34 2d 34 65 61 33 2d 39 30 37 64 |a-f714-4ea3-907d| -00000110 2d 66 64 35 34 66 62 32 35 64 39 62 35 12 07 4e |-fd54fb25d9b5..N| -00000120 61 74 61 6c 69 61 1a 08 53 6f 6b 6f 6c 6f 76 61 |atalia..Sokolova| -00000130 20 00 28 a6 3f 32 03 6a 70 67 50 1a 58 0b 6a 04 | .(.?2.jpgP.X.j.| -00000140 64 c8 01 32 72 08 50 6c 79 6d 6f 75 74 68 7a 08 |d..2r.Plymouthz.| -00000150 6a 9d 49 42 46 8c 84 c0 81 01 6e 86 1b f0 f9 21 |j.IBF.....n....!| -00000160 09 40 95 01 42 60 e5 3b 9d 01 cd cc ac 40 a0 01 |.@..B`.;.....@..| -00000170 ff ff a9 ce 93 8c 09 0a c3 01 0a 24 61 37 64 61 |...........$a7da| -00000180 31 61 61 36 2d 66 34 32 35 2d 34 37 38 39 2d 38 |1aa6-f425-4789-8| -00000190 39 34 37 2d 62 30 33 34 37 38 36 65 64 33 37 34 |947-b034786ed374| -000001a0 12 06 56 61 73 69 6c 79 1a 07 53 69 64 6f 72 6f |..Vasily..Sidoro| -000001b0 76 20 01 28 fb 48 32 03 62 6d 70 3a 0d 2b 34 34 |v .(.H2.bmp:.+44| -000001c0 32 30 31 32 33 34 35 36 37 38 40 01 4d 50 e0 27 |2012345678@.MP.'| -000001d0 5c 50 17 58 04 62 05 53 75 6e 6e 79 6a 05 fa 01 |\P.X.b.Sunnyj...| -000001e0 f4 01 0a 72 08 4d 75 72 6d 61 6e 73 6b 7a 08 fd |...r.Murmanskz..| -000001f0 f0 89 42 c8 4c 04 42 81 01 11 2d 44 54 fb 21 09 |..B.L.B...-DT.!.| -00000200 40 89 01 00 00 00 e8 76 48 37 42 95 01 00 00 48 |@......vH7B....H| -00000210 44 9d 01 cd cc 4c c0 a0 01 80 d4 9f 93 01 ab 01 |D....L..........| -00000220 0a 04 00 00 80 41 12 05 70 6f 75 6e 64 ac 01 b3 |.....A..pound...| -00000230 01 0b a2 06 05 0b 08 f7 03 0c 0c b4 01 |.............| -0000023d - -MESSAGE #1 AT 0x00000005 -uuid: "a7522158-3d41-4b77-ad69-6c598ee55c49" -name: "Ivan" -surname: "Petrov" -gender: male -birthDate: 4015 -photo: "png" -phoneNumber: "+74951234567\000" -isOnline: true -visitTime: 1546703100 -age: 38 -zodiacSign: capricorn -songs: "Yesterday" -songs: "Flowers" -color: 255 -color: 0 -color: 0 -hometown: "Moscow" -location: 55.7532158 -location: 37.6225052 -pi: 3.14 -lotteryWin: 214.1 -someRatio: 0.1 -temperature: 5.8 -randomBigNumber: 17060000000 -MeasureUnits { - coef: 1 - coef: 0.01 - coef: 1000 - unit: "meter" - unit: "centimeter" - unit: "kilometer" -} -Nestiness { - A { - b { - C { - d: 500 - e: 501 - e: 502 - } - } - } -} -MESSAGE #2 AT 0x000000F7 -uuid: "c694ad8a-f714-4ea3-907d-fd54fb25d9b5" -name: "Natalia" -surname: "Sokolova" -gender: female -birthDate: 8102 -photo: "jpg" -age: 26 -zodiacSign: pisces -color: 100 -color: 200 -color: 50 -hometown: "Plymouth" -location: 50.4037247 -location: -4.14212322 -pi: 3.14159 -someRatio: 0.007 -temperature: 5.4 -randomBigNumber: -20000000000000 -MESSAGE #3 AT 0x0000017A -uuid: "a7da1aa6-f425-4789-8947-b034786ed374" -name: "Vasily" -surname: "Sidorov" -gender: male -birthDate: 9339 -photo: "bmp" -phoneNumber: "+442012345678" -isOnline: true -visitTime: 1546117200 -age: 23 -zodiacSign: leo -songs: "Sunny" -color: 250 -color: 244 -color: 10 -hometown: "Murmansk" -location: 68.9706802 -location: 33.0749817 -pi: 3.14159265358979 -lotteryWin: 100000000000 -someRatio: 800 -temperature: -3.2 -randomBigNumber: 154400000 -MeasureUnits { - coef: 16 - unit: "pound" -} -Nestiness { - A { - b { - C { - d: 503 - } - } - } -} - -Binary representation differs from the expected one (listed below): -00000000 be 04 0a f1 01 0a 24 61 37 35 32 32 31 35 38 2d |......$a7522158-| -00000010 33 64 34 31 2d 34 62 37 37 2d 61 64 36 39 2d 36 |3d41-4b77-ad69-6| -00000020 63 35 39 38 65 65 35 35 63 34 39 12 04 49 76 61 |c598ee55c49..Iva| -00000030 6e 1a 06 50 65 74 72 6f 76 20 01 28 af 1f 32 03 |n..Petrov .(..2.| -00000040 70 6e 67 3a 0d 2b 37 34 39 35 31 32 33 34 35 36 |png:.+7495123456| -00000050 37 00 40 01 4d fc d0 30 5c 50 26 58 09 62 09 59 |7.@.M..0\P&X.b.Y| -00000060 65 73 74 65 72 64 61 79 62 07 46 6c 6f 77 65 72 |esterdayb.Flower| -00000070 73 68 ff 01 68 00 68 00 72 06 4d 6f 73 63 6f 77 |sh..h.h.r.Moscow| -00000080 7a 08 4b 03 5f 42 72 7d 16 42 81 01 1f 85 eb 51 |z.K._Br}.B.....Q| -00000090 b8 1e 09 40 89 01 33 33 33 33 33 c3 6a 40 95 01 |...@..33333.j@..| -000000a0 cd cc cc 3d 9d 01 9a 99 b9 40 a0 01 80 c4 d7 8d |...=.....@......| -000000b0 7f ab 01 0d 00 00 80 3f 0d 0a d7 23 3c 0d 00 00 |.......?...#<...| -000000c0 7a 44 12 05 6d 65 74 65 72 12 0a 63 65 6e 74 69 |zD..meter..centi| -000000d0 6d 65 74 65 72 12 09 6b 69 6c 6f 6d 65 74 65 72 |meter..kilometer| -000000e0 ac 01 b3 01 0b a2 06 0b 0b 08 f4 03 10 f5 03 10 |................| -000000f0 f6 03 0c 0c b4 01 0a 81 01 0a 24 63 36 39 34 61 |..........$c694a| -00000100 64 38 61 2d 66 37 31 34 2d 34 65 61 33 2d 39 30 |d8a-f714-4ea3-90| -00000110 37 64 2d 66 64 35 34 66 62 32 35 64 39 62 35 12 |7d-fd54fb25d9b5.| -00000120 07 4e 61 74 61 6c 69 61 1a 08 53 6f 6b 6f 6c 6f |.Natalia..Sokolo| -00000130 76 61 20 00 28 a6 3f 32 03 6a 70 67 50 1a 58 0b |va .(.?2.jpgP.X.| -00000140 68 64 68 c8 01 68 32 72 08 50 6c 79 6d 6f 75 74 |hdh..h2r.Plymout| -00000150 68 7a 08 6a 9d 49 42 46 8c 84 c0 81 01 6e 86 1b |hz.j.IBF.....n..| -00000160 f0 f9 21 09 40 95 01 42 60 e5 3b 9d 01 cd cc ac |..!.@..B`.;.....| -00000170 40 a0 01 ff ff a9 ce 93 8c 09 0a c3 01 0a 24 61 |@.............$a| -00000180 37 64 61 31 61 61 36 2d 66 34 32 35 2d 34 37 38 |7da1aa6-f425-478| -00000190 39 2d 38 39 34 37 2d 62 30 33 34 37 38 36 65 64 |9-8947-b034786ed| -000001a0 33 37 34 12 06 56 61 73 69 6c 79 1a 07 53 69 64 |374..Vasily..Sid| -000001b0 6f 72 6f 76 20 01 28 fb 48 32 03 62 6d 70 3a 0d |orov .(.H2.bmp:.| -000001c0 2b 34 34 32 30 31 32 33 34 35 36 37 38 40 01 4d |+442012345678@.M| -000001d0 50 e0 27 5c 50 17 58 04 62 05 53 75 6e 6e 79 68 |P.'\P.X.b.Sunnyh| -000001e0 fa 01 68 f4 01 68 0a 72 08 4d 75 72 6d 61 6e 73 |..h..h.r.Murmans| -000001f0 6b 7a 08 fd f0 89 42 c8 4c 04 42 81 01 11 2d 44 |kz....B.L.B...-D| -00000200 54 fb 21 09 40 89 01 00 00 00 e8 76 48 37 42 95 |T.!.@......vH7B.| -00000210 01 00 00 48 44 9d 01 cd cc 4c c0 a0 01 80 d4 9f |...HD....L......| -00000220 93 01 ab 01 0d 00 00 80 41 12 05 70 6f 75 6e 64 |........A..pound| -00000230 ac 01 b3 01 0b a2 06 05 0b 08 f7 03 0c 0c b4 01 |................| -00000240 - - -Settings used in the test: --max_insert_threads 0 --group_by_two_level_threshold 963158 --group_by_two_level_threshold_bytes 13149870 --distributed_aggregation_memory_efficient 1 --fsync_metadata 0 --output_format_parallel_formatting 0 --input_format_parallel_parsing 1 --min_chunk_bytes_for_parallel_parsing 18512987 --max_read_buffer_size 685645 --prefer_localhost_replica 0 --max_block_size 36563 --max_threads 2 --optimize_or_like_chain 1 --optimize_read_in_order 1 --enable_multiple_prewhere_read_steps 1 --read_in_order_two_level_merge_threshold 74 --optimize_aggregation_in_order 0 --aggregation_in_order_max_block_bytes 26213853 --min_compress_block_size 2850779 --max_compress_block_size 2496283 --use_uncompressed_cache 0 --min_bytes_to_use_direct_io 10737418240 --min_bytes_to_use_mmap_io 10737418240 --local_filesystem_read_method pread --remote_filesystem_read_method read --local_filesystem_read_prefetch 1 --filesystem_cache_segments_batch_size 10 --read_from_filesystem_cache_if_exists_otherwise_bypass_cache 0 --throw_on_error_from_cache_on_write_operations 0 --remote_filesystem_read_prefetch 1 --allow_prefetched_read_pool_for_remote_filesystem 0 --filesystem_prefetch_max_memory_usage 128Mi --filesystem_prefetches_limit 0 --filesystem_prefetch_min_bytes_for_single_read_task 1Mi --filesystem_prefetch_step_marks 50 --filesystem_prefetch_step_bytes 0 --compile_aggregate_expressions 1 --compile_sort_description 0 --merge_tree_coarse_index_granularity 8 --optimize_distinct_in_order 1 --optimize_sorting_by_input_stream_properties 1 --http_response_buffer_size 2897457 --http_wait_end_of_query True --enable_memory_bound_merging_of_aggregation_results 1 --min_count_to_compile_expression 3 --min_count_to_compile_aggregate_expression 0 --min_count_to_compile_sort_description 0 --session_timezone Africa/Juba - -MergeTree settings used in test: --ratio_of_defaults_for_sparse_serialization 1.0 --prefer_fetch_merged_part_size_threshold 10737418240 --vertical_merge_algorithm_min_rows_to_activate 1000000 --vertical_merge_algorithm_min_columns_to_activate 1 --allow_vertical_merges_from_compact_to_wide_parts 1 --min_merge_bytes_to_use_direct_io 1041313230 --index_granularity_bytes 7044432 --merge_max_block_size 16869 --index_granularity 27099 --min_bytes_for_wide_part 1073741824 --compress_marks 1 --compress_primary_key 1 --marks_compress_block_size 60638 --primary_key_compress_block_size 64768 --replace_long_file_name_to_hash 1 --max_file_name_length 0 - -Database: test_xjjpx0p6 - -Having 1 errors! 0 tests passed. 0 tests skipped. 20.40 s elapsed (MainProcess). -Won't run stateful tests because test data wasn't loaded. -All tests have finished. diff --git a/tests/performance/scripts/compare.sh b/tests/performance/scripts/compare.sh index c3566c51a16..1af98cd1691 100755 --- a/tests/performance/scripts/compare.sh +++ b/tests/performance/scripts/compare.sh @@ -73,6 +73,7 @@ function configure rm right/config/config.d/text_log.xml ||: # backups disk uses absolute path, and this overlaps between servers, that could lead to errors rm right/config/config.d/backups.xml ||: + rm left/config/config.d/backups.xml ||: cp -rv right/config left ||: # Start a temporary server to rename the tables diff --git a/tests/queries/0_stateless/01056_create_table_as_with_sorting_clauses.reference b/tests/queries/0_stateless/01056_create_table_as_with_sorting_clauses.reference new file mode 100644 index 00000000000..685ec07daa3 --- /dev/null +++ b/tests/queries/0_stateless/01056_create_table_as_with_sorting_clauses.reference @@ -0,0 +1,110 @@ +-------------- Test copy sorting clauses from source table -------------- +CREATE TABLE default.x +( + `CounterID` UInt32, + `EventDate` Date, + `UserID` UInt64 +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(EventDate) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +SETTINGS index_granularity = 8192 +------------------------------------------------------------------------- +CREATE TABLE default.x_as +( + `CounterID` UInt32, + `EventDate` Date, + `UserID` UInt64 +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(EventDate) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1, index_granularity = 8192 +-------------- Test copy sorting clauses from destination table (source table without the same type clauses) -------------- +CREATE TABLE default.x +( + `CounterID` UInt32, + `EventDate` Date, + `UserID` UInt64 +) +ENGINE = MergeTree +PRIMARY KEY (CounterID, EventDate, intHash32(UserID)) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SETTINGS index_granularity = 8192 +------------------------------------------------------------------------- +CREATE TABLE default.x_as +( + `CounterID` UInt32, + `EventDate` Date, + `UserID` UInt64 +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(EventDate) +PRIMARY KEY (CounterID, EventDate, intHash32(UserID)) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1, index_granularity = 8192 +-------------- Test copy sorting clauses from destination table (source table with the same type clauses) -------------- +CREATE TABLE default.x +( + `CounterID` UInt32, + `EventDate` Date, + `UserID` UInt64 +) +ENGINE = MergeTree +ORDER BY CounterID +SETTINGS index_granularity = 8192 +------------------------------------------------------------------------- +CREATE TABLE default.x_as +( + `CounterID` UInt32, + `EventDate` Date, + `UserID` UInt64 +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(EventDate) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1, index_granularity = 8192 +-------------- Test compatibility with allow_deprecated_syntax_for_merge_tree (source table is old syntax) -------------- +CREATE TABLE default.x +( + `CounterID` UInt32, + `EventDate` Date, + `UserID` UInt64 +) +ENGINE = MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)), 8192) +------------------------------------------------------------------------- +CREATE TABLE default.x_as +( + `CounterID` UInt32, + `EventDate` Date, + `UserID` UInt64 +) +ENGINE = MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)), 8192) +-------------- Test compatibility with allow_deprecated_syntax_for_merge_tree (source table is new syntax) -------------- +CREATE TABLE default.x +( + `CounterID` UInt32, + `EventDate` Date, + `UserID` UInt64 +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(EventDate) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +SETTINGS index_granularity = 8192 +------------------------------------------------------------------------- +CREATE TABLE default.x_as +( + `CounterID` UInt32, + `EventDate` Date, + `UserID` UInt64 +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(EventDate) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1, index_granularity = 8192 diff --git a/tests/queries/0_stateless/01056_create_table_as_with_sorting_clauses.sql b/tests/queries/0_stateless/01056_create_table_as_with_sorting_clauses.sql new file mode 100644 index 00000000000..9ecfdbabd49 --- /dev/null +++ b/tests/queries/0_stateless/01056_create_table_as_with_sorting_clauses.sql @@ -0,0 +1,58 @@ +DROP TABLE IF EXISTS x; +DROP TABLE IF EXISTS x_as; + +SELECT '-------------- Test copy sorting clauses from source table --------------'; +CREATE TABLE x (`CounterID` UInt32, `EventDate` Date, `UserID` UInt64) ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID); +CREATE TABLE x_as AS x ENGINE = MergeTree SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +SHOW CREATE TABLE x FORMAT TSVRaw; +SELECT '-------------------------------------------------------------------------'; +SHOW CREATE TABLE x_as FORMAT TSVRaw; + +DROP TABLE x; +DROP TABLE x_as; + +SELECT '-------------- Test copy sorting clauses from destination table (source table without the same type clauses) --------------'; +CREATE TABLE x (`CounterID` UInt32, `EventDate` Date, `UserID` UInt64) ENGINE = MergeTree PRIMARY KEY (CounterID, EventDate, intHash32(UserID)); +CREATE TABLE x_as AS x ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +SHOW CREATE TABLE x FORMAT TSVRaw; +SELECT '-------------------------------------------------------------------------'; +SHOW CREATE TABLE x_as FORMAT TSVRaw; + +DROP TABLE x; +DROP TABLE x_as; + +SELECT '-------------- Test copy sorting clauses from destination table (source table with the same type clauses) --------------'; +CREATE TABLE x (`CounterID` UInt32, `EventDate` Date, `UserID` UInt64) ENGINE = MergeTree ORDER BY (CounterID); +CREATE TABLE x_as AS x ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +SHOW CREATE TABLE x FORMAT TSVRaw; +SELECT '-------------------------------------------------------------------------'; +SHOW CREATE TABLE x_as FORMAT TSVRaw; + +DROP TABLE x; +DROP TABLE x_as; + +SELECT '-------------- Test compatibility with allow_deprecated_syntax_for_merge_tree (source table is old syntax) --------------'; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE x (`CounterID` UInt32, `EventDate` Date, `UserID` UInt64) ENGINE = MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)), 8192); +CREATE TABLE x_as AS x; + +SHOW CREATE TABLE x FORMAT TSVRaw; +SELECT '-------------------------------------------------------------------------'; +SHOW CREATE TABLE x_as FORMAT TSVRaw; + +DROP TABLE x; +DROP TABLE x_as; + +SELECT '-------------- Test compatibility with allow_deprecated_syntax_for_merge_tree (source table is new syntax) --------------'; +CREATE TABLE x (`CounterID` UInt32, `EventDate` Date, `UserID` UInt64) ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID); +CREATE TABLE x_as AS x ENGINE = MergeTree SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +SHOW CREATE TABLE x FORMAT TSVRaw; +SELECT '-------------------------------------------------------------------------'; +SHOW CREATE TABLE x_as FORMAT TSVRaw; + +DROP TABLE x; +DROP TABLE x_as; \ No newline at end of file diff --git a/tests/queries/0_stateless/01278_random_string_utf8.reference b/tests/queries/0_stateless/01278_random_string_utf8.reference index 45efb26db75..36ae0ace76a 100644 --- a/tests/queries/0_stateless/01278_random_string_utf8.reference +++ b/tests/queries/0_stateless/01278_random_string_utf8.reference @@ -2,4 +2,3 @@ String 1 -99 diff --git a/tests/queries/0_stateless/01278_random_string_utf8.sql b/tests/queries/0_stateless/01278_random_string_utf8.sql index 290d6a0c759..c0149dc6f36 100644 --- a/tests/queries/0_stateless/01278_random_string_utf8.sql +++ b/tests/queries/0_stateless/01278_random_string_utf8.sql @@ -1,9 +1,5 @@ --- Tags: no-fasttest --- no-fasttest: upper/lowerUTF8 use ICU - SELECT randomStringUTF8('string'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } SELECT lengthUTF8(randomStringUTF8(100)); SELECT toTypeName(randomStringUTF8(10)); SELECT isValidUTF8(randomStringUTF8(100000)); SELECT randomStringUTF8(0); -SELECT lengthUTF8(lowerUTF8(randomStringUTF8(99))); -- bug #49672: msan assert diff --git a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql index 82684f754b6..d3448138396 100644 --- a/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql +++ b/tests/queries/0_stateless/03206_projection_merge_special_mergetree.sql @@ -103,4 +103,4 @@ SELECT FROM system.projection_parts WHERE (database = currentDatabase()) AND (`table` = 'tp') AND (active = 1); -DROP TABLE tp; +DROP TABLE tp; \ No newline at end of file diff --git a/tests/queries/0_stateless/03206_projection_merge_special_mergetree_ignore.reference b/tests/queries/0_stateless/03206_projection_merge_special_mergetree_ignore.reference new file mode 100644 index 00000000000..4913ceae376 --- /dev/null +++ b/tests/queries/0_stateless/03206_projection_merge_special_mergetree_ignore.reference @@ -0,0 +1,6 @@ +2 0 +2 1 +2 2 +3 0 +3 1 +3 2 diff --git a/tests/queries/0_stateless/03206_projection_merge_special_mergetree_ignore.sql b/tests/queries/0_stateless/03206_projection_merge_special_mergetree_ignore.sql new file mode 100644 index 00000000000..113b5ce4ba6 --- /dev/null +++ b/tests/queries/0_stateless/03206_projection_merge_special_mergetree_ignore.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS tp; + +CREATE TABLE tp ( + type Int32, + eventcnt UInt64, + PROJECTION p (select sum(eventcnt), type group by type) +) engine = ReplacingMergeTree order by type +SETTINGS deduplicate_merge_projection_mode = 'ignore'; + +INSERT INTO tp SELECT number%3, 1 FROM numbers(3); +INSERT INTO tp SELECT number%3, 2 FROM numbers(3); + +OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError SUPPORT_IS_DISABLED } + +OPTIMIZE TABLE tp FINAL; + +SET optimize_use_projections = false, force_optimize_projection = false; + +SELECT sum(eventcnt) eventcnt, type +FROM tp +GROUP BY type +ORDER BY eventcnt, type; + +SET optimize_use_projections = true, force_optimize_projection = true; + +SELECT sum(eventcnt) eventcnt, type +FROM tp +GROUP BY type +ORDER By eventcnt, type; + +DROP TABLE tp; diff --git a/tests/queries/0_stateless/03237_insert_sparse_columns_mem.sh b/tests/queries/0_stateless/03237_insert_sparse_columns_mem.sh index ac682a0f574..af3e1c9fe80 100755 --- a/tests/queries/0_stateless/03237_insert_sparse_columns_mem.sh +++ b/tests/queries/0_stateless/03237_insert_sparse_columns_mem.sh @@ -11,7 +11,9 @@ for i in {1..250}; do table_structure+=", c$i String" done -$CLICKHOUSE_CLIENT --query " +MY_CLICKHOUSE_CLIENT="$CLICKHOUSE_CLIENT --enable_parsing_to_custom_serialization 1" + +$MY_CLICKHOUSE_CLIENT --query " DROP TABLE IF EXISTS t_insert_mem; DROP TABLE IF EXISTS t_reference; @@ -23,7 +25,7 @@ $CLICKHOUSE_CLIENT --query " filename="test_data_sparse_$CLICKHOUSE_DATABASE.json" -$CLICKHOUSE_CLIENT --query " +$MY_CLICKHOUSE_CLIENT --query " INSERT INTO FUNCTION file('$filename', LineAsString) SELECT format('{{ \"id\": {}, \"c{}\": \"{}\" }}', number, number % 250, hex(number * 1000000)) FROM numbers(30000) SETTINGS engine_file_truncate_on_insert = 1; @@ -34,15 +36,19 @@ $CLICKHOUSE_CLIENT --query " " for _ in {1..4}; do - $CLICKHOUSE_CLIENT --query "INSERT INTO t_reference SELECT * FROM file('$filename', JSONEachRow)" + $MY_CLICKHOUSE_CLIENT --query "INSERT INTO t_reference SELECT * FROM file('$filename', JSONEachRow)" done; -$CLICKHOUSE_CLIENT --enable_parsing_to_custom_serialization 1 --query "INSERT INTO t_insert_mem SELECT * FROM file('$filename', JSONEachRow)" -$CLICKHOUSE_CLIENT --enable_parsing_to_custom_serialization 1 --query "INSERT INTO t_insert_mem SELECT * FROM file('$filename', JSONEachRow)" -$CLICKHOUSE_CLIENT --enable_parsing_to_custom_serialization 1 --query "INSERT INTO t_insert_mem SELECT * FROM s3(s3_conn, filename='$filename', format='JSONEachRow')" -$CLICKHOUSE_CLIENT --query "SELECT * FROM file('$filename', LineAsString) FORMAT LineAsString" | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=INSERT+INTO+t_insert_mem+FORMAT+JSONEachRow&enable_parsing_to_custom_serialization=1" --data-binary @- +$MY_CLICKHOUSE_CLIENT --query "INSERT INTO t_insert_mem SELECT * FROM file('$filename', JSONEachRow)" +$MY_CLICKHOUSE_CLIENT --query "INSERT INTO t_insert_mem SELECT * FROM file('$filename', JSONEachRow)" -$CLICKHOUSE_CLIENT --query " +$MY_CLICKHOUSE_CLIENT --query "DETACH TABLE t_insert_mem" +$MY_CLICKHOUSE_CLIENT --query "ATTACH TABLE t_insert_mem" + +$MY_CLICKHOUSE_CLIENT --query "INSERT INTO t_insert_mem SELECT * FROM s3(s3_conn, filename='$filename', format='JSONEachRow')" +$MY_CLICKHOUSE_CLIENT --query "SELECT * FROM file('$filename', LineAsString) FORMAT LineAsString" | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=INSERT+INTO+t_insert_mem+FORMAT+JSONEachRow&enable_parsing_to_custom_serialization=1" --data-binary @- + +$MY_CLICKHOUSE_CLIENT --query " SELECT count() FROM t_insert_mem; SELECT sum(sipHash64(*)) FROM t_insert_mem; SELECT sum(sipHash64(*)) FROM t_reference; @@ -53,7 +59,7 @@ $CLICKHOUSE_CLIENT --query " SYSTEM FLUSH LOGS; - SELECT written_bytes <= 3000000 FROM system.query_log + SELECT written_bytes <= 10000000 FROM system.query_log WHERE query LIKE 'INSERT INTO t_insert_mem%' AND current_database = '$CLICKHOUSE_DATABASE' AND type = 'QueryFinish' ORDER BY event_time_microseconds; diff --git a/tests/queries/0_stateless/03239_nan_with_fill.reference b/tests/queries/0_stateless/03239_nan_with_fill.reference new file mode 100644 index 00000000000..10c8c316670 --- /dev/null +++ b/tests/queries/0_stateless/03239_nan_with_fill.reference @@ -0,0 +1,3 @@ +nan +nan +nan diff --git a/tests/queries/0_stateless/03239_nan_with_fill.sql b/tests/queries/0_stateless/03239_nan_with_fill.sql new file mode 100644 index 00000000000..69eaf9b0c42 --- /dev/null +++ b/tests/queries/0_stateless/03239_nan_with_fill.sql @@ -0,0 +1,3 @@ +SELECT nan ORDER BY 1 WITH FILL; +SELECT -nan ORDER BY 1 WITH FILL; +SELECT 0./0. ORDER BY 1 WITH FILL; diff --git a/tests/queries/0_stateless/03243_lower_utf8_msan.reference b/tests/queries/0_stateless/03243_lower_utf8_msan.reference new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/tests/queries/0_stateless/03243_lower_utf8_msan.reference @@ -0,0 +1 @@ +0 diff --git a/tests/queries/0_stateless/03243_lower_utf8_msan.sql b/tests/queries/0_stateless/03243_lower_utf8_msan.sql new file mode 100644 index 00000000000..d147ccc3478 --- /dev/null +++ b/tests/queries/0_stateless/03243_lower_utf8_msan.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + +SELECT ignore(lengthUTF8(lowerUTF8(randomStringUTF8(99)))); -- bug #49672: msan assert diff --git a/tests/queries/0_stateless/03247_generic_arrayMin_arrayMax_fixes.reference b/tests/queries/0_stateless/03247_generic_arrayMin_arrayMax_fixes.reference new file mode 100644 index 00000000000..8a143e535e2 --- /dev/null +++ b/tests/queries/0_stateless/03247_generic_arrayMin_arrayMax_fixes.reference @@ -0,0 +1,37 @@ +-- { echoOn } +-- https://github.com/ClickHouse/ClickHouse/issues/68895 +SELECT arrayMax(x -> toFixedString('.', 1), []); +. +-- https://github.com/ClickHouse/ClickHouse/issues/69600 +SELECT arrayMax(x -> (-x), [1, 2, 4]) AS res; +-1 +SELECT arrayMax(x -> toUInt16(-x), [1, 2, 4]) AS res; +65535 +-- https://github.com/ClickHouse/ClickHouse/pull/69640 +SELECT arrayMin(x1 -> (x1 * toNullable(-1)), materialize([1, 2, 3])); +-3 +SELECT arrayMin(x1 -> x1 * -1, [1,2,3]); +-3 +DROP TABLE IF EXISTS test_aggregation_array; +CREATE TABLE test_aggregation_array (x Array(Int)) ENGINE=MergeTree() ORDER by tuple(); +INSERT INTO test_aggregation_array VALUES ([1,2,3,4,5,6]), ([]), ([1,2,3]); +SELECT [arrayMin(x1 -> (x1 * materialize(-1)), [toNullable(toUInt256(0)), materialize(4)])], arrayMin([arrayMin([0])]) FROM test_aggregation_array GROUP BY arrayAvg([1]), [0, toUInt256(8)] WITH CUBE SETTINGS allow_experimental_analyzer = 1; +[-4] 0 +[-4] 0 +[-4] 0 +[-4] 0 +SELECT [arrayMin([3, arrayMin([toUInt128(8)]), 4, 5]), arrayMax([materialize(1)]), arrayMin([arrayMax([1]), 2]), 2], arrayMin([0, toLowCardinality(8)]), 2, arrayMax(x1 -> (x1 * -1), x) FROM test_aggregation_array; +[3,1,1,2] 0 2 -1 +[3,1,1,2] 0 2 0 +[3,1,1,2] 0 2 -1 +select arrayMax(x -> x.1, [(1, 'a'), (0, 'b')]); +1 +select arrayMin(x -> x.2, [(1, 'a'), (0, 'b')]); +a +-- Extra validation of generic arrayMin/arrayMax +WITH [(1,2),(1,3)] AS t SELECT arrayMin(t), arrayMax(t); +(1,2) (1,3) +WITH [map('a', 1, 'b', 2), map('a',1,'b',3)] AS t SELECT arrayMin(t), arrayMax(t); +{'a':1,'b':2} {'a':1,'b':3} +WITH [map('a', 1, 'b', 2, 'c', 10), map('a',1,'b',3, 'c', 0)] AS t SELECT arrayMin(x -> x['c'], t), arrayMax(x -> x['c'], t); +0 10 diff --git a/tests/queries/0_stateless/03247_generic_arrayMin_arrayMax_fixes.sql b/tests/queries/0_stateless/03247_generic_arrayMin_arrayMax_fixes.sql new file mode 100644 index 00000000000..2cd052917b2 --- /dev/null +++ b/tests/queries/0_stateless/03247_generic_arrayMin_arrayMax_fixes.sql @@ -0,0 +1,26 @@ +-- { echoOn } +-- https://github.com/ClickHouse/ClickHouse/issues/68895 +SELECT arrayMax(x -> toFixedString('.', 1), []); + +-- https://github.com/ClickHouse/ClickHouse/issues/69600 +SELECT arrayMax(x -> (-x), [1, 2, 4]) AS res; +SELECT arrayMax(x -> toUInt16(-x), [1, 2, 4]) AS res; + +-- https://github.com/ClickHouse/ClickHouse/pull/69640 +SELECT arrayMin(x1 -> (x1 * toNullable(-1)), materialize([1, 2, 3])); +SELECT arrayMin(x1 -> x1 * -1, [1,2,3]); + +DROP TABLE IF EXISTS test_aggregation_array; +CREATE TABLE test_aggregation_array (x Array(Int)) ENGINE=MergeTree() ORDER by tuple(); +INSERT INTO test_aggregation_array VALUES ([1,2,3,4,5,6]), ([]), ([1,2,3]); + +SELECT [arrayMin(x1 -> (x1 * materialize(-1)), [toNullable(toUInt256(0)), materialize(4)])], arrayMin([arrayMin([0])]) FROM test_aggregation_array GROUP BY arrayAvg([1]), [0, toUInt256(8)] WITH CUBE SETTINGS allow_experimental_analyzer = 1; +SELECT [arrayMin([3, arrayMin([toUInt128(8)]), 4, 5]), arrayMax([materialize(1)]), arrayMin([arrayMax([1]), 2]), 2], arrayMin([0, toLowCardinality(8)]), 2, arrayMax(x1 -> (x1 * -1), x) FROM test_aggregation_array; + +select arrayMax(x -> x.1, [(1, 'a'), (0, 'b')]); +select arrayMin(x -> x.2, [(1, 'a'), (0, 'b')]); + +-- Extra validation of generic arrayMin/arrayMax +WITH [(1,2),(1,3)] AS t SELECT arrayMin(t), arrayMax(t); +WITH [map('a', 1, 'b', 2), map('a',1,'b',3)] AS t SELECT arrayMin(t), arrayMax(t); +WITH [map('a', 1, 'b', 2, 'c', 10), map('a',1,'b',3, 'c', 0)] AS t SELECT arrayMin(x -> x['c'], t), arrayMax(x -> x['c'], t); diff --git a/tests/queries/0_stateless/03247_materialized_view_select_intersect.reference b/tests/queries/0_stateless/03247_materialized_view_select_intersect.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03247_materialized_view_select_intersect.sql b/tests/queries/0_stateless/03247_materialized_view_select_intersect.sql new file mode 100644 index 00000000000..72efac0ce27 --- /dev/null +++ b/tests/queries/0_stateless/03247_materialized_view_select_intersect.sql @@ -0,0 +1 @@ +CREATE MATERIALIZED VIEW v0 AS (SELECT 1) INTERSECT (SELECT 1); --{serverError QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW} diff --git a/tests/result b/tests/result deleted file mode 100644 index b76f44f1e6a..00000000000 --- a/tests/result +++ /dev/null @@ -1,12 +0,0 @@ -Using queries from 'queries' directory -Connecting to ClickHouse server...... OK -Connected to server 24.7.1.1 @ 246f421f2402799fd11b22a608b4d0d497cb8438 chesema-processor-onCancel - -Running 1 stateless tests (MainProcess). - -00993_system_parts_race_condition_drop_zookeeper: [ OK ] - -1 tests passed. 0 tests skipped. 124.59 s elapsed (MainProcess). - -0 tests passed. 0 tests skipped. 0.00 s elapsed (MainProcess). -All tests have finished. diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index fec72709174..5554b916c39 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v24.9.2.42-stable 2024-10-03 v24.9.1.3278-stable 2024-09-26 v24.8.4.13-lts 2024-09-06 v24.8.3.59-lts 2024-09-03